diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..56deefdb0fc3824710637f2e6d436d6404c576e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/CMakeLists.txt @@ -0,0 +1,755 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +add_custom_target( + cutlass_test_unit_gemm_device + DEPENDS + cutlass_test_unit_gemm_device_simt + cutlass_test_unit_gemm_device_tensorop_sm70 + cutlass_test_unit_gemm_device_tensorop_sm75 + cutlass_test_unit_gemm_device_tensorop_f16_sm80 + cutlass_test_unit_gemm_device_tensorop_f32_sm80 + cutlass_test_unit_gemm_device_tensorop_f32_tf32_sm80 + cutlass_test_unit_gemm_device_tensorop_f64 + cutlass_test_unit_gemm_device_tensorop_s32_sm80 + cutlass_test_unit_gemm_device_wmma + cutlass_test_unit_gemm_device_tensorop_planar_complex + cutlass_test_unit_gemm_device_sparse_tensorop_sm80 + cutlass_test_unit_gemv_device + cutlass_test_unit_gemm_device_tensorop_sm90 + cutlass_test_unit_gemm_device_tensorop_cluster_multicast_sm90 +) + +add_custom_target( + test_unit_gemm_device + DEPENDS + test_unit_gemm_device_simt + test_unit_gemm_device_tensorop_sm70 + test_unit_gemm_device_tensorop_sm75 + test_unit_gemm_device_tensorop_f16_sm80 + test_unit_gemm_device_tensorop_f32_sm80 + test_unit_gemm_device_tensorop_f32_tf32_sm80 + test_unit_gemm_device_tensorop_f64 + test_unit_gemm_device_tensorop_s32_sm80 + test_unit_gemm_device_wmma + test_unit_gemm_device_tensorop_planar_complex + test_unit_gemm_device_sparse_tensorop_sm80 + test_unit_gemv_device + test_unit_gemm_device_tensorop_sm90 +) + +add_custom_target( + cutlass_test_unit_gemm_device_sm90 + DEPENDS + cutlass_test_unit_gemm_device_tensorop_sm90 + cutlass_test_unit_gemm_device_tensorop_cluster_multicast_sm90 +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_simt + + BATCH_SOURCES ON + BATCH_SIZE 4 + + simt_sgemm_nt_sm80.cu + simt_sgemm_tn_sm80.cu + + simt_cgemm_nt_sm80.cu + simt_cgemm_tn_sm80.cu + + simt_f8gemm_tn_sm50.cu + + simt_cgemm_nn_sm50.cu + simt_cgemm_nt_sm50.cu + simt_cgemm_tn_sm50.cu + simt_cgemm_tt_sm50.cu + + simt_qgemm_nn_sm50.cu + simt_qgemm_nt_sm50.cu + simt_qgemm_tn_sm50.cu + simt_qgemm_tt_sm50.cu + + simt_dgemm_nn_sm50.cu + simt_dgemm_nt_sm50.cu + simt_dgemm_tn_sm50.cu + simt_dgemm_tt_sm50.cu + + simt_hgemm_nn_sm50.cu + simt_hgemm_nt_sm50.cu + simt_hgemm_tn_sm50.cu + simt_hgemm_tt_sm50.cu + + simt_igemm_nn_sm50.cu + simt_igemm_nt_sm50.cu + simt_igemm_tn_sm50.cu + simt_igemm_tt_sm50.cu + + simt_int8_igemm_sm61_sliced_k.cu + simt_int8_igemm_sm61.cu + + simt_sgemm_nn_sm50.cu + simt_sgemm_nt_sm50.cu + simt_sgemm_tn_sm50.cu + simt_sgemm_tt_sm50.cu + + simt_zgemm_nn_sm50.cu + simt_zgemm_nt_sm50.cu + simt_zgemm_tn_sm50.cu + simt_zgemm_tt_sm50.cu + + gemm_splitk_simt_sm50.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_simt_3x + + BATCH_SOURCES ON + BATCH_SIZE 4 + + + sm50_gemm_f32_f32_f32_simt.cu + sm80_gemm_f32_f32_f32_simt.cu + sm50_gemm_f64_f64_f64_simt.cu + sm80_gemm_f64_f64_f64_simt.cu + sm61_gemm_s8_s8_s32_simt.cu +) + + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_sm70 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + + gemm_f16n_f16n_f32t_volta_tensor_op_f32_sm70.cu + gemm_f16n_f16t_f32t_volta_tensor_op_f32_sm70.cu + gemm_f16t_f16n_f32t_volta_tensor_op_f32_sm70.cu + gemm_f16t_f16t_f32t_volta_tensor_op_f32_sm70.cu + + gemm_f16n_f16n_f16t_volta_tensor_op_f32_sm70.cu + + gemm_f16n_f16t_f16t_volta_tensor_op_f16_sm70.cu + gemm_f16t_f16n_f16t_volta_tensor_op_f16_sm70.cu + + gemm_splitk_tensor_op_sm70.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_sm75 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_universal_f16n_f16t_f32n_tensor_op_f32_sm75.cu + gemm_universal_f16n_f16t_f32t_tensor_op_f32_sm75.cu + + gemm_f16t_f16n_f16t_tensor_op_f16_sm75.cu + gemm_f16n_f16t_f16t_tensor_op_f16_sm75.cu + gemm_f16n_f16t_f16t_tensor_op_f16_slicedk_sm75.cu + gemm_f16t_f16n_f16t_tensor_op_f16_slicedk_sm75.cu + + gemm_f16n_f16n_f16t_tensor_op_f32_sm75.cu + + gemm_f16n_f16n_f32t_tensor_op_f32_sm75.cu + gemm_f16n_f16t_f32t_tensor_op_f32_sm75.cu + gemm_f16t_f16n_f32t_tensor_op_f32_sm75.cu + gemm_f16t_f16t_f32t_tensor_op_f32_sm75.cu + + gemm_f16n_f16n_f32n_tensor_op_f32_sm75.cu + gemm_f16t_f16t_f32n_tensor_op_f32_sm75.cu + + gemm_s8n_s8t_s8n_tensor_op_s32_sm75.cu + gemm_s8t_s8n_s32t_tensor_op_s32_sm75.cu + gemm_s8t_s8n_s32n_tensor_op_s32_sm75.cu + gemm_s8t_s8n_s8t_tensor_op_s32_sm75.cu + gemm_s8t_s8n_s8n_tensor_op_s32_sm75.cu + + gemm_s4n_s4t_s4n_tensor_op_s32_sm75.cu + gemm_s4t_s4n_s32t_tensor_op_s32_sm75.cu + gemm_s4t_s4n_s32n_tensor_op_s32_sm75.cu + gemm_s4t_s4n_s4n_tensor_op_s32_sm75.cu + gemm_s4t_s4n_s4t_tensor_op_s32_sm75.cu + + gemm_b1t_b1n_s32t_tensor_op_s32_sm75.cu + gemm_b1t_b1n_s32n_tensor_op_s32_sm75.cu + + gemm_splitk_serial_tensor_op_sm75.cu + gemm_splitk_tensor_op_sm75.cu + +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_f16_sm80 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_f16t_f16n_f16t_tensor_op_f16_slicedk_sm80.cu + gemm_f16n_f16t_f16t_tensor_op_f16_slicedk_sm80.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_f32_sm80 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_f16n_f16n_f16t_tensor_op_f32_sm80.cu + gemm_f16n_f16n_f32n_tensor_op_f32_sm80.cu + gemm_f16n_f16n_f32t_tensor_op_f32_sm80.cu + gemm_f16n_f16t_f16t_tensor_op_f16_sm80.cu + gemm_f16n_f16t_f32t_tensor_op_f32_sm80.cu + gemm_f16t_f16n_f16t_tensor_op_f16_sm80.cu + gemm_f16t_f16n_f32t_tensor_op_f32_sm80.cu + gemm_f16t_f16t_f32n_tensor_op_f32_sm80.cu + gemm_f16t_f16t_f32t_tensor_op_f32_sm80.cu + gemm_bf16n_bf16n_f32t_tensor_op_f32_sm80.cu + gemm_bf16t_bf16t_bf16t_tensor_op_f32_sm80.cu + gemm_f16n_f16n_f16n_direct_store_tensor_op_f32_sm80.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_f32_sm80_3x + + sm80_gemm_s8_s8_s32_tensor_op.cu + sm80_gemm_f16_f16_f32_tensor_op_f32.cu + sm80_gemm_tf32_tf32_f32_tensor_op_f32.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_sm90 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + sm90_gemm_f16_f16_f16_tensor_op.cu + sm90_gemm_bf16_bf16_bf16_tensor_op_f32.cu + sm90_gemm_s8_s8_s8_tensor_op_s32.cu + sm90_gemm_tf32_tf32_f32_tensor_op_f32.cu + sm90_gemm_f32_f32_f32_tensor_op_f32.cu + sm90_gemm_f8_f8_f32_tensor_op_fp32.cu + sm90_gemm_f8_f8_bf16_tensor_op_fp32.cu + sm90_gemm_f8_f8_f8_tensor_op_fp32.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_sm90_stream_k + + sm90_gemm_stream_k_scheduler.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu + sm90_gemm_f8_f8_f32_tensor_op_f32_cooperative_stream_k.cu +) + +# Alignment tests +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_alignx_sm90 + + BATCH_SOURCES ON + BATCH_SIZE 4 + sm90_gemm_f16_f16_f16_alignx_tensor_op.cu + sm90_gemm_bf16_bf16_bf16_alignx_tensor_op_f32.cu + sm90_gemm_s8_s8_s8_alignx_tensor_op_s32.cu + sm90_gemm_tf32_tf32_f32_alignx_tensor_op_f32.cu +) + +# Fused epilogue tests +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_epilogue_fusion_sm90 + + BATCH_SOURCES ON + BATCH_SIZE 4 + sm90_gemm_f16_f16_f16_tensor_op_f32_tensor_broadcast.cu + sm90_gemm_f32_f32_f32_tensor_op_f32_tensor_broadcast.cu + sm90_gemm_s8_s8_s8_tensor_op_s32_tensor_broadcast.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_bias_elementwise.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_bias_elementwise.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_aux_load.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_aux_load.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_row_broadcast.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_row_broadcast.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_reduce.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_reduce.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_dag.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_dag.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_cluster_multicast_sm90 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_unspecialized.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong.cu + sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative.cu + sm90_gemm_f8_f8_f32_tensor_op_f32_cluster_warpspecialized_cooperative.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_gmma_rs_warpspecialized_sm90 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + sm90_gemm_tf32_tf32_f32_tensor_op_f32_gmma_rs_cluster_warpspecialized.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_f32_tf32_sm80 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_tf32t_tf32n_f32t_tensor_op_f32_sm80.cu + gemm_tf32n_tf32t_f32t_tensor_op_f32_sm80.cu + gemm_tf32n_tf32n_f32t_tensor_op_f32_sm80.cu + gemm_tf32t_tf32t_f32t_tensor_op_f32_sm80.cu + gemm_universal_cf32n_cf32n_cf32n_tensor_op_f32_sm80.cu + gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32_sm80.cu + gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32_sm80.cu + + gemm_f32n_f32n_f32t_tensor_op_f32_sm80.cu + gemm_f32n_f32n_f32t_tensor_op_bf16_f32_sm80.cu + + sm80_gemm_f16_f16_f32_tensor_op_f32.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_f64 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_f64n_f64t_f64t_tensor_op_f64_sm80.cu + gemm_f64t_f64n_f64t_tensor_op_f64_sm80.cu + + gemm_universal_cf64n_cf64t_cf64t_tensor_op_f64_sm80.cu + gemm_universal_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm80.cu + gemm_cf64n_cf64t_cf64t_tensor_op_f64_sm80.cu + gemm_cf64t_cf64n_cf64t_tensor_op_f64_sm80.cu + gemm_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm80.cu + gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu + + # SM90 device level tests + gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu + gemm_f64t_f64n_f64t_tensor_op_f64_sm90.cu + + sm80_gemm_f64_f64_f64_tensor_op_f64.cu + + gemm_cf64n_cf64t_cf64t_tensor_op_f64_sm90.cu + gemm_cf64t_cf64n_cf64t_tensor_op_f64_sm90.cu + gemm_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm90.cu + gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian_sm90.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_s32_sm80 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_s8t_s8n_s32t_tensor_op_s32_sm80.cu + gemm_s8t_s8n_s32n_tensor_op_s32_sm80.cu + gemm_s8t_s8n_s8n_tensor_op_s32_sm80.cu + gemm_s8t_s8n_s8t_tensor_op_s32_sm80.cu + gemm_s8t_s8n_f16t_tensor_op_s32_sm80.cu + gemm_s4t_s4n_s32n_tensor_op_s32_sm80.cu + gemm_s4t_s4n_s32t_tensor_op_s32_sm80.cu + gemm_s4t_s4n_s4n_tensor_op_s32_sm80.cu + gemm_s4t_s4n_s4t_tensor_op_s32_sm80.cu + gemm_b1t_b1n_s32n_tensor_op_s32_sm80.cu + gemm_b1t_b1n_s32t_tensor_op_s32_sm80.cu + + gemm_s8n_s8t_s8n_tensor_op_s32_sm80.cu + gemm_s4n_s4t_s4n_tensor_op_s32_sm80.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_wmma + + BATCH_SOURCES ON + BATCH_SIZE 4 + + # wmma floating point tests + gemm_f16t_f16n_f16t_wmma_tensor_op_f16_sm70.cu + gemm_f16n_f16t_f16t_wmma_tensor_op_f16_sm70.cu + gemm_f16t_f16t_f16t_wmma_tensor_op_f16_sm70.cu + gemm_f16n_f16n_f16t_wmma_tensor_op_f16_sm70.cu + gemm_f16t_f16n_f16n_wmma_tensor_op_f16_sm70.cu + gemm_f16n_f16t_f16n_wmma_tensor_op_f16_sm70.cu + gemm_f16t_f16t_f16n_wmma_tensor_op_f16_sm70.cu + gemm_f16n_f16n_f16n_wmma_tensor_op_f16_sm70.cu + + gemm_f16t_f16n_f32t_wmma_tensor_op_f32_sm70.cu + gemm_f16n_f16t_f32t_wmma_tensor_op_f32_sm70.cu + gemm_f16t_f16t_f32t_wmma_tensor_op_f32_sm70.cu + gemm_f16n_f16n_f32t_wmma_tensor_op_f32_sm70.cu + gemm_f16t_f16n_f32n_wmma_tensor_op_f32_sm70.cu + gemm_f16n_f16t_f32n_wmma_tensor_op_f32_sm70.cu + gemm_f16t_f16t_f32n_wmma_tensor_op_f32_sm70.cu + gemm_f16n_f16n_f32n_wmma_tensor_op_f32_sm70.cu + + gemm_f16t_f16n_f16t_wmma_tensor_op_f32_sm70.cu + gemm_f16n_f16t_f16t_wmma_tensor_op_f32_sm70.cu + gemm_f16t_f16t_f16t_wmma_tensor_op_f32_sm70.cu + gemm_f16n_f16n_f16t_wmma_tensor_op_f32_sm70.cu + gemm_f16t_f16n_f16n_wmma_tensor_op_f32_sm70.cu + gemm_f16n_f16t_f16n_wmma_tensor_op_f32_sm70.cu + gemm_f16t_f16t_f16n_wmma_tensor_op_f32_sm70.cu + gemm_f16n_f16n_f16n_wmma_tensor_op_f32_sm70.cu + + # wmma int8 tests + gemm_s8t_s8n_s32t_wmma_tensor_op_s32_sm72.cu + gemm_s8t_s8n_s32n_wmma_tensor_op_s32_sm72.cu + + gemm_s8t_s8n_s8t_wmma_tensor_op_s32_sm72.cu + gemm_s8t_s8n_s8n_wmma_tensor_op_s32_sm72.cu + + # wmma uint8 tests + gemm_u8t_u8n_s32t_wmma_tensor_op_s32_sm72.cu + + # wmma sub byptes (s4 and b1) tests + gemm_s4t_s4n_s32n_wmma_tensor_op_s32_sm75.cu + gemm_s4t_s4n_s32t_wmma_tensor_op_s32_sm75.cu + + gemm_b1t_b1n_s32n_wmma_tensor_op_s32_sm75.cu + gemm_b1t_b1n_s32t_wmma_tensor_op_s32_sm75.cu + + # wmma floating point tests (using singestage pipeline) + gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16_sm70.cu + gemm_f16t_f16n_f16n_singlestage_wmma_tensor_op_f16_sm70.cu + + gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32_sm70.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_tensorop_planar_complex + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_planar_complex_f16_f16_f32_tensor_op_sm70.cu + gemm_planar_complex_f16_f16_f32_tensor_op_sm75.cu + gemm_planar_complex_f16_f16_f32_tensor_op_sm80.cu +) +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_grouped + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_grouped_sm80.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_grouped_scheduler + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_grouped_scheduler_sm80.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_grouped_rank_2k_scheduler + + BATCH_SOURCES ON + BATCH_SIZE 4 + + rank_2k_grouped_scheduler_sm80.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_sparse_tensorop_sm80 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemm_f16n_f16n_f16t_tensor_op_f32_sparse_sm80.cu + gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu + gemm_f16n_f16t_f16t_tensor_op_f16_sparse_sm80.cu + gemm_f16n_f16t_f32t_tensor_op_f32_sparse_sm80.cu + gemm_f16t_f16n_f16t_tensor_op_f16_sparse_sm80.cu + gemm_f16t_f16n_f32t_tensor_op_f32_sparse_sm80.cu + gemm_f16t_f16t_f32t_tensor_op_f32_sparse_sm80.cu + gemm_f32t_f32n_f32t_tensor_op_f32_sparse_sm80.cu + gemm_f32n_f32t_f32t_tensor_op_f32_sparse_sm80.cu + gemm_f32t_f32t_f32t_tensor_op_f32_sparse_sm80.cu + gemm_f32n_f32n_f32t_tensor_op_f32_sparse_sm80.cu + gemm_s8t_s8n_s32t_tensor_op_s32_sparse_sm80.cu + gemm_s4t_s4n_s32t_tensor_op_s32_sparse_sm80.cu +) + + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemv_device + + BATCH_SOURCES ON + BATCH_SIZE 4 + + gemv.cu +) + +if (NOT CUDA_COMPILER MATCHES "[Cc]lang") + +add_dependencies( + cutlass_test_unit_gemm_device + cutlass_test_unit_gemm_device_gemm_with_fused_epilogue_tensorop + ) + +add_dependencies( + test_unit_gemm_device + test_unit_gemm_device_gemm_with_fused_epilogue_tensorop + ) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_gemm_with_fused_epilogue_tensorop + + gemm_with_reduction_f16n_f16n_f16n_tensorop_f32_sm75.cu + gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu + + gemm_with_reduction_f16t_f16n_f16n_tensorop_f32_sm80.cu +) + +endif() + +if (NOT CUDA_COMPILER MATCHES "[Cc]lang") + +add_dependencies( + cutlass_test_unit_gemm_device + cutlass_test_unit_gemm_device_blas3 + ) + +add_dependencies( + test_unit_gemm_device + test_unit_gemm_device_blas3 + ) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_blas3 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + ## SYRK + # Syrk SM80 f64 tests + syrk_f64n_f64t_tensor_op_f64_sm80.cu + syrk_f64t_f64n_tensor_op_f64_sm80.cu + + # Syrk SM80 f32 tests + syrk_tf32n_f32t_tensor_op_f32_sm80.cu + syrk_tf32t_f32t_tensor_op_f32_sm80.cu + syrk_f32n_f32t_tensor_op_fast_f32_sm80.cu + syrk_f32t_f32t_tensor_op_fast_f32_sm80.cu + + # Syrk SM80 complex f64 tests + syrk_cf64n_cf64t_tensor_op_f64_sm80.cu + syrk_cf64n_cf64n_tensor_op_f64_sm80.cu + syrk_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu + + # Syrk SM80 complex f32 tests + syrk_cf32n_cf32t_tensor_op_f32_sm80.cu + syrk_cf32n_cf32n_tensor_op_f32_sm80.cu + syrk_cf32n_cf32t_tensor_op_fast_f32_sm80.cu + syrk_cf32n_cf32n_tensor_op_fast_f32_sm80.cu + + # Syrk SM90 f64 tests + syrk_f64_f64_tensor_op_f64_sm90.cu + + # Syrk SM90 complex f64 tests + syrk_cf64_cf64_tensor_op_f64_sm90.cu + + ## HERK + # Herk SM80 complex f64 tests + herk_cf64h_cf64n_tensor_op_f64_sm80.cu + + # Herk SM80 complex f32 tests + herk_cf32h_cf32n_tensor_op_f32_sm80.cu + herk_cf32h_cf32n_tensor_op_fast_f32_sm80.cu + + # Herk SM90 complex f64 tests + herk_cf64_cf64_tensor_op_f64_sm90.cu + + ## TRMM + # Trmm SM80 f64 tests + trmm_f64n_f64n_f64t_tensor_op_f64_ls_sm80.cu + trmm_f64n_f64n_f64t_tensor_op_f64_rs_sm80.cu + trmm_f64t_f64t_f64n_tensor_op_f64_ls_sm80.cu + trmm_f64t_f64t_f64n_tensor_op_f64_rs_sm80.cu + trmm_f64n_f64t_f64t_tensor_op_f64_rs_sm80.cu + + # Trmm SM80 f32 tests + trmm_tf32t_tf32n_f32t_tensor_op_f32_ls_sm80.cu + trmm_tf32n_tf32t_f32t_tensor_op_f32_ls_sm80.cu + trmm_tf32n_tf32t_f32t_tensor_op_f32_rs_sm80.cu + trmm_tf32t_tf32n_f32n_tensor_op_f32_ls_sm80.cu + trmm_f32t_f32n_f32t_tensor_op_fast_f32_ls_sm80.cu + trmm_f32n_f32t_f32t_tensor_op_fast_f32_ls_sm80.cu + trmm_f32n_f32t_f32t_tensor_op_fast_f32_rs_sm80.cu + trmm_f32t_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu + + # Trmm SM80 complex f64 tests + trmm_cf64n_cf64n_cf64t_tensor_op_f64_sm80.cu + trmm_cf64n_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu + + # Trmm SM80 complex f32 tests + trmm_cf32n_cf32n_cf32t_tensor_op_f32_sm80.cu + trmm_cf32n_cf32n_cf32t_tensor_op_fast_f32_sm80.cu + + # Trmm SM90 f64 tests + trmm_f64_f64_f64_tensor_op_f64_sm90.cu + + # Trmm SM90 complex f64 tests + trmm_cf64_cf64_cf64_tensor_op_f64_sm90.cu + + ## SYR2K + # Syr2k SM80 f64 tests + syr2k_f64n_f64t_tensor_op_f64_sm80.cu + syr2k_f64n_f64n_tensor_op_f64_sm80.cu + syr2k_f64t_f64n_tensor_op_f64_sm80.cu + + # Syr2k SM80 f32 tests + syr2k_tf32n_f32n_tensor_op_f32_sm80.cu + syr2k_tf32t_f32n_tensor_op_f32_sm80.cu + syr2k_f32n_f32n_tensor_op_fast_f32_sm80.cu + syr2k_f32t_f32n_tensor_op_fast_f32_sm80.cu + + # Syr2k SM80 complex f64 tests + syr2k_cf64n_cf64t_tensor_op_f64_sm80.cu + syr2k_cf64n_cf64n_tensor_op_f64_sm80.cu + + # Syr2k SM80 complex f32 tests + syr2k_cf32n_cf32n_tensor_op_f32_sm80.cu + syr2k_cf32n_cf32t_tensor_op_f32_sm80.cu + syr2k_cf32n_cf32n_tensor_op_fast_f32_sm80.cu + syr2k_cf32n_cf32t_tensor_op_fast_f32_sm80.cu + + # Syr2k SM90 f64 tests + syr2k_f64_f64_tensor_op_f64_sm90.cu + + # Syr2k SM90 complex f64 tests + syr2k_cf64_cf64_tensor_op_f64_sm90.cu + + ## HER2K + # Her2k SM80 complex f64 tests + her2k_cf64n_cf64n_tensor_op_f64_sm80.cu + + # Her2k SM80 complex f32 tests + her2k_cf32h_cf32n_tensor_op_f32_sm80.cu + her2k_cf32h_cf32n_tensor_op_fast_f32_sm80.cu + + # Her2k SM90 complex f64 tests + her2k_cf64_cf64_tensor_op_f64_sm90.cu + + ## SYMM + # Symm SM80 f64 tests + symm_f64n_f64n_tensor_op_f64_ls_sm80.cu + symm_f64n_f64n_tensor_op_f64_rs_sm80.cu + symm_f64n_f64t_tensor_op_f64_ls_sm80.cu + symm_f64n_f64t_tensor_op_f64_rs_sm80.cu + symm_f64t_f64n_tensor_op_f64_ls_sm80.cu + symm_f64t_f64n_tensor_op_f64_rs_sm80.cu + symm_f64t_f64t_tensor_op_f64_ls_sm80.cu + symm_f64t_f64t_tensor_op_f64_rs_sm80.cu + + # Symm SM80 f32 tests + symm_tf32n_f32n_tensor_op_f32_ls_sm80.cu + symm_tf32n_f32n_tensor_op_f32_rs_sm80.cu + symm_tf32t_f32t_tensor_op_f32_ls_sm80.cu + symm_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu + symm_f32n_f32n_tensor_op_fast_f32_rs_sm80.cu + symm_f32t_f32t_tensor_op_fast_f32_ls_sm80.cu + + # Symm SM80 complex f64 tests + symm_cf64n_cf64n_cf64n_tensor_op_ls_f64_sm80.cu + symm_cf64n_cf64n_cf64n_tensor_op_rs_f64_sm80.cu + symm_cf64n_cf64n_cf64n_tensor_op_ls_f64_gaussian_sm80.cu + + # Symm SM80 complex f32 tests + symm_cf32n_cf32n_tensor_op_f32_ls_sm80.cu + symm_cf32n_cf32n_tensor_op_f32_rs_sm80.cu + symm_cf32n_cf32n_tensor_op_fast_f32_ls_sm80.cu + symm_cf32n_cf32n_tensor_op_fast_f32_rs_sm80.cu + + # Symm SM90 f64 tests + symm_f64_f64_tensor_op_f64_sm90.cu + + # Symm SM90 complex f64 tests + symm_cf64_cf64_cf64_tensor_op_f64_sm90.cu + + # Hemm SM80 complex f64 tests + hemm_cf64h_cf64n_cf64n_tensor_op_ls_f64_sm80.cu + hemm_cf64h_cf64n_cf64n_tensor_op_rs_f64_sm80.cu + hemm_cf64h_cf64n_cf64n_tensor_op_ls_f64_gaussian_sm80.cu + + # Hemm SM80 complex f32 tests + hemm_cf32h_cf32n_tensor_op_f32_ls_sm80.cu + hemm_cf32h_cf32n_tensor_op_f32_rs_sm80.cu + hemm_cf32h_cf32n_tensor_op_fast_f32_ls_sm80.cu + hemm_cf32h_cf32n_tensor_op_fast_f32_rs_sm80.cu + + # Hemm SM90 complex f64 tests + hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu +) + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_grouped_blas3 + + BATCH_SOURCES ON + BATCH_SIZE 4 + + # Grouped SYR2K SM80 f64 tests + syr2k_f64n_f64n_tensor_op_f64_grouped_sm80.cu + syr2k_f64n_f64t_tensor_op_f64_grouped_sm80.cu + syr2k_f64t_f64n_tensor_op_f64_grouped_sm80.cu + syr2k_f64t_f64t_tensor_op_f64_grouped_sm80.cu + + # Grouped SYR2K SM80 cf64 tests + syr2k_cf64n_cf64n_tensor_op_f64_grouped_sm80.cu + syr2k_cf64n_cf64t_tensor_op_f64_grouped_sm80.cu + syr2k_cf64t_cf64n_tensor_op_f64_grouped_sm80.cu + syr2k_cf64t_cf64t_tensor_op_f64_grouped_sm80.cu + + # Grouped HER2K SM80 f64 tests + her2k_cf64n_cf64n_tensor_op_f64_grouped_sm80.cu + her2k_cf64h_cf64n_tensor_op_f64_grouped_sm80.cu +) + +endif() + +if (NOT CUDA_COMPILER MATCHES "[Cc]lang") + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_device_broadcast + + gemm_f16t_f16n_f16t_tensor_op_f16_broadcast_sm80.cu +) + +endif() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/__pycache__/simt_sm50.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/__pycache__/simt_sm50.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86ecd274c80e2ef90ad302ef3d72acc2a17f2a33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/__pycache__/simt_sm50.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/default_gemm_configuration.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/default_gemm_configuration.hpp new file mode 100644 index 0000000000000000000000000000000000000000..96d7894681d5b9bf4ee66c8c8566319eaabaca62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/default_gemm_configuration.hpp @@ -0,0 +1,1366 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cute/atom/mma_atom.hpp" +#include "cute/atom/copy_atom.hpp" + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/arch/arch.h" +#include "cutlass/arch/mma.h" +#include "cutlass/layout/layout.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/collective/collective_mma.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" + +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +namespace cutlass { +namespace gemm { +namespace device { +using namespace cute; + +// This type is only intended to demonstrate porting 2.x kernels to 3.0 +template< + class OperatorClass, class ArchTag, + class ElementA, class LayoutA, + class ElementB, class LayoutB, + class ElementC, class LayoutC, + class ElementAccumulator> +struct DefaultGemmConfigurationToCutlass3Types { + static_assert(sizeof(ElementA) == 0, "No valid DefaultGemmConfigurationToCutlass3Types configuration exists."); +}; + +/////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct DefaultGemm_TensorOpSm80_OperandA; + +template +struct DefaultGemm_TensorOpSm80_OperandB; + +// +// F16: 128-by-128-by-64 +// + +/// Operand A - Row-major (K-Major) +template <> +struct DefaultGemm_TensorOpSm80_OperandA +{ + // Smem + using SmemLayoutAtom = decltype( + composition(Swizzle<3,3,3>{}, + Layout, + Stride<_64, _1>>{})); + using SmemCopyAtom = Copy_Atom; + + // Gmem + using GmemTiledCopy = decltype( + make_tiled_copy(Copy_Atom, half_t>{}, + Layout, + Stride< _8,_1>>{}, + Layout>{})); +}; + +/// Operand A - Column-major (M-major) +template +struct DefaultGemm_TensorOpSm80_OperandA +{ + // Smem + using SmemLayoutAtom = decltype( + composition(Swizzle<3,3,3>{}, + Layout, + Stride< _1,_64>>{})); + using SmemCopyAtom = Copy_Atom; + + // Gmem + using GmemTiledCopy = decltype( + make_tiled_copy(Copy_Atom, half_t>{}, + Layout, + Stride< _1,_16>>{}, + Layout>{})); +}; + +// Because the F32F16 TiledMMA is A-B symmetric, we can reuse the DefaultOperands + +// Operand B - Column-Major (K-major) +template +struct DefaultGemm_TensorOpSm80_OperandB + : DefaultGemm_TensorOpSm80_OperandA +{}; + +// Operand B - Row-Major (N-major) +template +struct DefaultGemm_TensorOpSm80_OperandB + : DefaultGemm_TensorOpSm80_OperandA +{}; + +// +// F16: 128-by-128-by-32 (small k-block) +// + +/// Operand A - Row-major (K-Major) +template <> +struct DefaultGemm_TensorOpSm80_OperandA +{ + // Smem + using SmemLayoutAtom = decltype( + composition(Swizzle<2,3,3>{}, + Layout, + Stride<_32, _1>>{})); + using SmemCopyAtom = Copy_Atom; + + // Gmem + using GmemTiledCopy = decltype( + make_tiled_copy(Copy_Atom, half_t>{}, + Layout, + Stride< _4,_1>>{}, + Layout>{})); +}; + +} + +/////////////////////////////////////////////////////////////////////////////// + +// Ampere MMA F32F16 +template +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassTensorOp, arch::Sm80, + half_t, LayoutA, + half_t, LayoutB, + float, LayoutC, + float> +{ + using TileShape = Shape<_128, _128, _32>; + static constexpr int ThreadCount = 128; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom, + Layout>, // 2x2x1 thread group + Layout>>; // 1x2x1 value group for 16x16x16 MMA and LDSM + + // A + static constexpr int kAlignmentA = 8; + using DefaultOperandA = detail::DefaultGemm_TensorOpSm80_OperandA< + half_t, LayoutA, kAlignmentA, 32>; + using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom; // M, K + using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom; + using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy; + + // B + static constexpr int kAlignmentB = 8; + using DefaultOperandB = detail::DefaultGemm_TensorOpSm80_OperandB< + half_t, LayoutB, kAlignmentB, 32>; + using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom; // N, K + using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom; + using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy; + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + half_t, TagToStrideA_t, + half_t, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +// +// TF32: 128-by-128-by-kblock (kBlock = 16, 32) +// + +/// Operand A - Row-major (K-major) (kBlock = 32) +template <> +struct DefaultGemm_TensorOpSm80_OperandA +{ + // Smem + using SmemLayoutAtom = decltype( + composition(Swizzle<3,2,3>{}, + Layout, + Stride<_32, _1>>{})); + using SmemCopyAtom = Copy_Atom; + + // Gmem + using GmemTiledCopy = decltype( + make_tiled_copy(Copy_Atom, tfloat32_t>{}, + Layout, + Stride< _8,_1>>{}, + Layout>{})); +}; + +/// Operand A - Row-major (K-major) (kBlock = 16) +template <> +struct DefaultGemm_TensorOpSm80_OperandA +{ + // Smem + using SmemLayoutAtom = decltype( + composition(Swizzle<2,2,3>{}, + Layout, + Stride<_16, _1>>{})); + using SmemCopyAtom = Copy_Atom; + // Gmem + using GmemTiledCopy = decltype( + make_tiled_copy(Copy_Atom, tfloat32_t>{}, + Layout, + Stride< _4,_1>>{}, + Layout>{})); +}; + +/// Operand A - Column-major (M-major) +template +struct DefaultGemm_TensorOpSm80_OperandA +{ + // Smem + using SmemLayoutAtom = decltype( + composition(Swizzle<3,2,3>{}, + Layout, + Stride< _1,_32>>{})); + using SmemCopyAtom = Copy_Atom, tfloat32_t>; + // Gmem + using GmemTiledCopy = decltype( + make_tiled_copy(Copy_Atom, tfloat32_t>{}, + Layout, + Stride< _1,_16>>{}, + Layout>{})); +}; + +// Because the TF32 TiledMMA is A-B symmetric, we can reuse the DefaultOperands + +// Operand B - Column-Major (K-major) +template +struct DefaultGemm_TensorOpSm80_OperandB + : DefaultGemm_TensorOpSm80_OperandA +{}; + +// Operand B - Row-Major (N-major) +template +struct DefaultGemm_TensorOpSm80_OperandB + : DefaultGemm_TensorOpSm80_OperandA +{}; + +} + +/////////////////////////////////////////////////////////////////////////////// + +// Ampere MMA F32TF32 +template +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassTensorOp, arch::Sm80, + tfloat32_t, LayoutA, + tfloat32_t, LayoutB, + float, LayoutC, + float> +{ + using TileShape = Shape<_128, _128, _32>; + static constexpr int ThreadCount = 128; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom, + Layout, Stride<_2, _1, _1>>, // 2x2x1 thread group + Layout>>; // 1x2x1 value group for 16x16x8 and LDSM + + // A + static constexpr int kAlignmentA = 4; + using DefaultOperandA = detail::DefaultGemm_TensorOpSm80_OperandA< + tfloat32_t, LayoutA, kAlignmentA, 32>; + using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom; // M, K + using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom; + using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy; + + // B + static constexpr int kAlignmentB = 4; + using DefaultOperandB = detail::DefaultGemm_TensorOpSm80_OperandB< + tfloat32_t, LayoutB, kAlignmentB, 32>; + using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom; // N, K + using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom; + using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy; + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + tfloat32_t, TagToStrideA_t, + tfloat32_t, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// +template +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassTensorOp, arch::Sm80, + int8_t, cutlass::layout::RowMajor, + int8_t, cutlass::layout::ColumnMajor, + int32_t, LayoutC, + int32_t> +{ + using TileShape = Shape<_128, _128, _64>; + static constexpr int ThreadCount = 128; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom, + Layout>, // 2x2x1 thread group + Layout>>; // 1x2x1 value group for 16x16x32 and LDSM + + // A (M,K) K-major + using SmemLayoutAtomA = decltype( + composition( + Swizzle<2,4,3>{}, + Layout, + Stride<_64, _1>>{})); + static constexpr int kAlignmentA = 16; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, int8_t>{}, + Layout, + Stride< _4,_1>>{}, + Layout>>{})); + // LDS.32- or LDSM-based copy atom + // using SmemCopyAtomA = Copy_Atom; + using SmemCopyAtomA = Copy_Atom; // LDSM works + + // B (N,K) K-major + using SmemLayoutAtomB = decltype( + composition( + Swizzle<2,4,3>{}, + Layout, + Stride<_64, _1>>{})); + static constexpr int kAlignmentB = 16; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, int8_t>{}, + Layout, + Stride< _4,_1>>{}, + Layout>>{})); + + // LDS.32- or LDSM-based copy atom + // using SmemCopyAtomB = Copy_Atom; + using SmemCopyAtomB = Copy_Atom; // LDSM works + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + int8_t, TagToStrideA_t, + int8_t, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// +//////////////////////////// SIMT TWO STAGE /////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct DefaultGemm_Simt_OperandA; + +/////////////////////////////////////////////////////////////////////////////// + +template +struct DefaultGemm_Simt_OperandA +{ + using SmemLayoutAtom = Layout, + Stride< _1,_128>>; + + using SmemCopyAtom = Copy_Atom; + + using GmemTiledCopy = decltype( + make_tiled_copy(Copy_Atom, Element>{}, + Layout, + Stride< _1,_32>>{}, + Layout>{})); +}; + +template +struct DefaultGemm_Simt_OperandA +{ + using SmemLayoutAtom = Layout, + Stride< _1,Int<128 + 4>>>; // Padded + + using SmemCopyAtom = Copy_Atom; + + using GmemTiledCopy = decltype( + make_tiled_copy(Copy_Atom, Element>{}, + Layout, + Stride< _8, _1>>{}, + Layout>{})); + +}; + +template +struct DefaultGemm_Simt_OperandB; + +template +struct DefaultGemm_Simt_OperandB + : DefaultGemm_Simt_OperandA {}; + +template +struct DefaultGemm_Simt_OperandB + : DefaultGemm_Simt_OperandA {}; + +} // end namespace detail + +// SIMT Two Stage +template < + class ArchTag, + class ElementA, class LayoutA, + class ElementB, class LayoutB, + class ElementC, class LayoutC, + class ElementAccumulator> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, ArchTag, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementAccumulator> +{ + using TileShape = Shape<_128, _128, _8>; + static constexpr int ThreadCount = 256; + using DispatchPolicy = MainloopSm70TwoStage; + using TiledMma = TiledMMA< + MMA_Atom>, + Layout>>; + + // A + static constexpr int kAlignmentA = 1; + using DefaultOperandA = detail::DefaultGemm_Simt_OperandA; + using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom; + using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom; + using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy; + + // B + static constexpr int kAlignmentB = 1; + using DefaultOperandB = detail::DefaultGemm_Simt_OperandB; + using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom; + using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom; + using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy; + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + + +// +// DP4A - int8 Proof-of-concept +// + +// SIMT Two Stage TN - idp4a +template < + class ArchTag, + class ElementC, class LayoutC> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, ArchTag, + int8_t, cutlass::layout::RowMajor, + int8_t, cutlass::layout::ColumnMajor, + ElementC, LayoutC, + int32_t> +{ + using TileShape = Shape<_128, _128, _32>; + static constexpr int ThreadCount = 256; + using DispatchPolicy = MainloopSm70TwoStage; + // NOTE: permuting MMA M mode lets us generate 128b smem loads (LDS.128) but has worst case bank conflicts + using TiledMma = TiledMMA< + MMA_Atom, + Layout>>; // Tile of atoms (threads) + + // A (M,K) K-major + using ElementA = int8_t; + // 40% from regular M and N major layout + // using SmemLayoutAtomA = Layout, + // Stride< _1,_128>>; + // 80% from interleaved layouts + using SmemLayoutAtomA = Layout>, + Stride< _4, Stride<_1,_512>>>; + + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 4; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, ElementA>{}, + Layout, + Stride< _8,_1>>{}, + Layout>{})); + + // B (N,K) K-major + using ElementB = int8_t; + // 40% from regular M and N major layout + // using SmemLayoutAtomB = Layout, + // Stride< _1,_128>>; + // 80% from interleaved layouts + using SmemLayoutAtomB = Layout>, + Stride< _4, Stride<_1,_512>>>; + + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 4; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, ElementB>{}, + Layout, + Stride< _8,_1>>{}, + Layout>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// SIMT Two Stage NN - idp4a +template < + class ArchTag, + class ElementC, class LayoutC> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, ArchTag, + int8_t, cutlass::layout::ColumnMajor, + int8_t, cutlass::layout::ColumnMajor, + ElementC, LayoutC, + int32_t> +{ + using TileShape = Shape<_128, _128, _32>; + static constexpr int ThreadCount = 256; + + using DispatchPolicy = MainloopSm70TwoStage; + + using TiledMma = TiledMMA< + MMA_Atom, + Layout>>; + + // A (M,K) M-major + using ElementA = int8_t; + using SmemLayoutAtomA = Layout>, + Stride< _4, Stride<_1,_512>>>; + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 1; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, ElementA>{}, + Layout, + Stride< _1,_32>>{}, + Layout>{})); + + // B (N,K) K-major + using ElementB = int8_t; + using SmemLayoutAtomB = Layout>, + Stride< _4, Stride<_1,_512>>>; + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 4; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, ElementB>{}, + Layout, + Stride< _8,_1>>{}, + Layout>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// SIMT Two Stage NT - idp4a +template < + class ArchTag, + class ElementC, class LayoutC> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, ArchTag, + int8_t, cutlass::layout::ColumnMajor, + int8_t, cutlass::layout::RowMajor, + ElementC, LayoutC, + int32_t> +{ + using TileShape = Shape<_128, _128, _32>; + static constexpr int ThreadCount = 256; + using DispatchPolicy = MainloopSm70TwoStage; + using TiledMma = TiledMMA< + MMA_Atom, + Layout>>; + + // A (M,K) M-major + using ElementA = int8_t; + using SmemLayoutAtomA = Layout>, + Stride< _4, Stride<_1,_512>>>; + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 1; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, ElementA>{}, + Layout, + Stride< _1,_32>>{}, + Layout>{})); + + // B (N,K) N-major + using ElementB = int8_t; + using SmemLayoutAtomB = Layout>, + Stride< _4, Stride<_1,_512>>>; + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 1; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, ElementB>{}, + Layout, + Stride< _1,_32>>{}, + Layout>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// SIMT Two Stage TT - idp4a +template < + class ArchTag, + class ElementC, class LayoutC> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, ArchTag, + int8_t, cutlass::layout::RowMajor, + int8_t, cutlass::layout::RowMajor, + ElementC, LayoutC, + int32_t> +{ + using TileShape = Shape<_128, _128, _32>; + static constexpr int ThreadCount = 256; + using DispatchPolicy = MainloopSm70TwoStage; + using TiledMma = TiledMMA< + MMA_Atom, + Layout>>; + + // A (M,K) K-major + using ElementA = int8_t; + using SmemLayoutAtomA = Layout>, + Stride< _4, Stride<_1,_512>>>; + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 4; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, ElementA>{}, + Layout, + Stride< _8,_1>>{}, + Layout>{})); + + // B (N,K) N-major + using ElementB = int8_t; + using SmemLayoutAtomB = Layout>, + Stride< _4, Stride<_1,_512>>>; + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 1; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, ElementB>{}, + Layout, + Stride< _1,_32>>{}, + Layout>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////// SIMT MULTI STAGE ////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +// SIMT Multi Stage NT +template < + class ElementA, + class ElementB, + class ElementC, class LayoutC, + class ElementAccumulator> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, arch::Sm80, + ElementA, cutlass::layout::ColumnMajor, + ElementB, cutlass::layout::RowMajor, + ElementC, LayoutC, + ElementAccumulator> +{ + using TileShape = Shape<_128, _128, _16>; + static constexpr int ThreadCount = 256; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom>, + Layout>, + Layout>, + Tile,Layout<_2,_16>,Underscore>>; + + // A (M,K) M-major + using SmemLayoutAtomA = Layout>; + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 2; + using AlignmentTypeA = cute::uint_byte_t(sizeof(ElementA)) * kAlignmentA>; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, ElementA>{}, + Layout>{}, + Layout>{})); + + // B (N,K) N-major + using SmemLayoutAtomB = Layout>; + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 2; + using AlignmentTypeB = cute::uint_byte_t(sizeof(ElementB)) * kAlignmentB>; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, ElementB>{}, + Layout>{}, + Layout>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// SIMT Multi Stage TN +template < + class ElementA, + class ElementB, + class ElementC, class LayoutC, + class ElementAccumulator> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, arch::Sm80, + ElementA, cutlass::layout::RowMajor, + ElementB, cutlass::layout::ColumnMajor, + ElementC, LayoutC, + ElementAccumulator> +{ + using TileShape = Shape<_128, _128, _16>; + static constexpr int ThreadCount = 256; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom>, + Layout>>; + + // A (M,K) K-major + using SmemLayoutAtomA = Layout, + Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentA + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 1; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, ElementA>{}, + Layout, + Stride<_16, _1>>{})); + + // B (N,K) K-major + using SmemLayoutAtomB = Layout, + Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentB + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 1; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, ElementB>{}, + Layout, + Stride<_16, _1>>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// SIMT Multi Stage NN +template < + class ElementA, + class ElementB, + class ElementC, class LayoutC, + class ElementAccumulator> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, arch::Sm80, + ElementA, cutlass::layout::ColumnMajor, + ElementB, cutlass::layout::ColumnMajor, + ElementC, LayoutC, + ElementAccumulator> +{ + using TileShape = Shape<_128, _128, _16>; + static constexpr int ThreadCount = 256; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom>, + Layout>, + Layout>, + Tile,Underscore,Underscore>>; + + // A (M,K) M-major + using SmemLayoutAtomA = Layout>; + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 2; + using AlignmentTypeA = cute::uint_byte_t(sizeof(ElementA)) * kAlignmentA>; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, ElementA>{}, + Layout>{}, + Layout>{})); + + // B (N,K) K-major + using SmemLayoutAtomB = Layout, + Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentB + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 1; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, ElementB>{}, + Layout, + Stride<_16, _1>>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// SIMT Multi Stage TT +template < + class ElementA, + class ElementB, + class ElementC, class LayoutC, + class ElementAccumulator> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassSimt, arch::Sm80, + ElementA, cutlass::layout::RowMajor, + ElementB, cutlass::layout::RowMajor, + ElementC, LayoutC, + ElementAccumulator> +{ + using TileShape = Shape<_128, _128, _16>; + static constexpr int ThreadCount = 256; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom>, + Layout>, + Layout>, + Tile,Underscore>>; + + // A (M,K) K-major + using SmemLayoutAtomA = Layout, + Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentA + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 1; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, ElementA>{}, + Layout, + Stride<_16, _1>>{})); + + // B (N,K) N-major + using SmemLayoutAtomB = Layout>; + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 2; + using AlignmentTypeB = cute::uint_byte_t(sizeof(ElementB)) * kAlignmentB>; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, ElementB>{}, + Layout>{}, + Layout>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + ElementA, TagToStrideA_t, + ElementB, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// Ampere fp64 MMA TN (K-Major A and K-Major B) +template <> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassTensorOp, arch::Sm80, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double> +{ + using TileShape = Shape<_128, _64, _16>; + static constexpr int ThreadCount = 128; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom, // Atom + Layout>, // Atom layout + Layout>, // Val layout + Tile,Layout<_2,_16>,Underscore>>; // Mode permutations + + // A (M,K) K-Major + using SmemLayoutAtomA = decltype( + composition(Swizzle<2,0,4>{}, + Layout, + Stride<_1, _4>>{})); // M, K + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 1; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, double>{}, // CopyAtom + Layout, + Stride<_16, _1>>{}, // ThrLayout for CopyAtom + Layout>{})); // Value layout: 1x1 doubles + + // B (N,K) K-Major + using SmemLayoutAtomB = decltype( + composition(Swizzle<2,0,4>{}, + Layout, + Stride<_1, _4>>{})); // N, K + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 1; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, double>{}, // CopyAtom + Layout, + Stride<_16, _1>>{}, // ThrLayout for CopyAtom + Layout>{})); // Value layout: 1x1 doubles + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + double, TagToStrideA_t, + double, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; + +/* + using EpilogueOutputOp = epilogue::collective::Epilogue< + epilogue::thread::LinearCombination, + Layout, + Stride< _1,_64>>, // SMEM layout + Copy_Atom,double>, // R2S with tiled_mma layout + decltype(make_tiled_copy(Copy_Atom,double>{},// S2R + Layout, + Stride< _1,_16>>{}, // Thread layout + Layout>{})), // Value layout + Copy_Atom,double> // R2G with S2R_dst layout + >; +*/ +}; + +/////////////////////////////////////////////////////////////////////////////// + +// Ampere fp64 MMA NN (M-Major A and K-Major B) +template <> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassTensorOp, arch::Sm80, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double> +{ + using TileShape = Shape<_128, _64, _16>; + static constexpr int ThreadCount = 128; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom, // Atom + Layout>, // Atom layout + Layout>, // Val layout + Tile,Layout<_2,_16>,Underscore>>; // Mode permutations + + // A (M,K) M-Major + using SmemLayoutAtomA = decltype( + composition(Swizzle<2,2,2>{}, + Layout, + Stride< _1,_16>>{})); // M, K + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 2; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, double>{}, // CopyAtom + Layout, + Stride< _1,_16>>{}, // ThrLayout for CopyAtom + Layout>{})); // Value layout: 2x1 doubles + + // B (N,K) K-Major + using SmemLayoutAtomB = decltype( + composition(Swizzle<2,0,4>{}, + Layout, + Stride<_1, _4>>{}));// N, K + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 1; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, double>{}, // CopyAtom + Layout, + Stride<_16, _1>>{}, // ThrLayout for CopyAtom + Layout>{})); // Value layout: 1x1 doubles + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + double, TagToStrideA_t, + double, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// Ampere fp64 MMA NT (M-Major A and N-Major B) +template <> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassTensorOp, arch::Sm80, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double> +{ + using TileShape = Shape<_128, _64, _16>; + static constexpr int ThreadCount = 128; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom, // Atom + Layout>, // Atom layout + Layout>, // Val layout + Tile,Layout<_2,_16>,Underscore>>; // Mode permutations + + // A (M,K) M-Major + using SmemLayoutAtomA = decltype( + composition(Swizzle<2,2,2>{}, + Layout, + Stride< _1,_16>>{})); // M, K + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 2; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, double>{}, // CopyAtom + Layout, + Stride< _1,_16>>{}, // ThrLayout for CopyAtom + Layout>{})); // Value layout: 2x1 doubles + + // B (N,K) N-Major + using SmemLayoutAtomB = decltype( + composition(Swizzle<2,2,2>{}, + Layout, + Stride< _1,_16>>{})); // N, K + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 2; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, double>{}, // CopyAtom + Layout, + Stride< _1,_16>>{}, // ThrLayout for CopyAtom + Layout>{})); // Value layout: 2x1 doubles + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + double, TagToStrideA_t, + double, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// Ampere fp64 MMA TT (K-Major A and N-Major B) +template <> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassTensorOp, arch::Sm80, + double, cutlass::layout::RowMajor, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double> +{ + using TileShape = Shape<_128, _64, _16>; + static constexpr int ThreadCount = 128; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom, // Atom + Layout>, // Atom layout + Layout>, // Val layout + Tile,Layout<_2,_16>,Underscore>>; // Mode permutations + + // A (M,K) K-Major + using SmemLayoutAtomA = decltype( + composition(Swizzle<2,0,4>{}, + Layout, + Stride<_1, _4>>{})); // M, K + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 1; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, double>{}, // CopyAtom + Layout, + Stride<_16, _1>>{}, // ThrLayout for CopyAtom + Layout>{})); // Value layout: 1x1 doubles + + // B (N,K) N-Major + using SmemLayoutAtomB = decltype( + composition(Swizzle<2,2,2>{}, + Layout, + Stride< _1,_16>>{})); // N, K + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 2; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, double>{}, // CopyAtom + Layout, + Stride< _1,_16>>{}, // ThrLayout for CopyAtom + Layout>{})); // Value layout: 2x1 doubles + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + double, TagToStrideA_t, + double, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< + TagToStrideC_t, + TagToStrideC_t, + epilogue::thread::LinearCombination, + cutlass::gemm::EpilogueDefault>; +}; + +/////////////////////////////////////////////////////////////////////////////// + +// Hopper fp64 MMA TN +template <> +struct DefaultGemmConfigurationToCutlass3Types< + arch::OpClassTensorOp, arch::Sm90, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double> +{ + using TileShape = Shape<_128, _64, _16>; + static constexpr int ThreadCount = 128; + using DispatchPolicy = MainloopSm80CpAsync<3>; + using TiledMma = TiledMMA< + MMA_Atom, + Layout>>; + + // A (M,K) K-major + using SmemLayoutAtomA = decltype( + make_ordered_layout(Shape<_128,_16>{}, + Step < _2, _1>{})); // M, K + using SmemCopyAtomA = Copy_Atom; + static constexpr int kAlignmentA = 2; + using GmemTiledCopyA = decltype( + make_tiled_copy(Copy_Atom, double>{}, + Layout, + Stride< _8,_1>>{}, + Layout>{})); + + // B (N,K) K-major + using SmemLayoutAtomB = decltype( + make_ordered_layout(Shape<_64,_16>{}, + Step < _2, _1>{})); // N, K + using SmemCopyAtomB = Copy_Atom; + static constexpr int kAlignmentB = 2; + using GmemTiledCopyB = decltype( + make_tiled_copy(Copy_Atom, double>{}, + Layout, + Stride< _8,_1>>{}, + Layout>{})); + + // Mainloop + using CollectiveMainloop = collective::CollectiveMma< + DispatchPolicy, TileShape, + double, TagToStrideA_t, + double, TagToStrideB_t, + TiledMma, + GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A + GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B + >; + + // Epilogue + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + double, double, + double, cutlass::layout::ColumnMajor, 1, + double, cutlass::layout::ColumnMajor, 1, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..3f9b31d4e4803e42e587bddaeb525551166aa332 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm75.cu @@ -0,0 +1,232 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x256x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 256x128x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x128x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x256x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 256x64x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 64, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x128x512_32x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 512>, + cutlass::gemm::GemmShape<32, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x64x512_64x32x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 512>, + cutlass::gemm::GemmShape<64, 32, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x64x512_32x32x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<32, 32, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..905d03a8b4ad740dacdf8a699c2d1e3001a57070 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm80.cu @@ -0,0 +1,704 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_B1_AND_SM80_ENABLED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x256x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 256x128x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3,128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x128x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, + cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 256x64x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x256x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x128x1024_32x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 1024>, + cutlass::gemm::GemmShape<32, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x64x1024_64x32x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 1024>, + cutlass::gemm::GemmShape<64, 32, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x64x1024_32x32x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 1024>, + cutlass::gemm::GemmShape<32, 32, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x256x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 256x128x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x128x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 256x64x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x256x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x128x512_32x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 512>, + cutlass::gemm::GemmShape<32, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 128x64x512_64x32x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 512>, + cutlass::gemm::GemmShape<64, 32, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_b1t_b1n_s32n_tensor_op_s32, 64x64x512_32x32x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<32, 32, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6, 128, 128, + false, cutlass::arch::OpAndPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +#endif // defined(CUTLASS_ARCH_MMA_B1_AND_SM80_ENABLED) + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_B1_XOR_SM80_ENABLED) + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 128x256x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, + cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 256x128x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 128x128x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, + cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 256x64x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 64x256x1024_64x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 64x128x1024_32x64x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 1024>, + cutlass::gemm::GemmShape<32, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 128x64x1024_64x32x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 1024>, + cutlass::gemm::GemmShape<64, 32, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 64x64x1024_32x32x1024) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 1024>, + cutlass::gemm::GemmShape<32, 32, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 128x256x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 256x128x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 128x128x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 256x64x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 64x256x512_64x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 64x128x512_32x64x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 512>, + cutlass::gemm::GemmShape<32, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 128x64x512_64x32x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 512>, + cutlass::gemm::GemmShape<64, 32, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_XOR_b1t_b1n_s32n_tensor_op_s32, 64x64x512_32x32x512) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<32, 32, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +#endif // defined(CUTLASS_ARCH_MMA_B1_XOR_SM80_ENABLED) + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_wmma_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_wmma_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..b804d94f31c4afb27ce5fc5a779182dba5d38f80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32n_wmma_tensor_op_s32_sm75.cu @@ -0,0 +1,243 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" +///////////////////////////////////////////////////////////////////////////////////////////////// +////// WMMA Instruction Shape = 8x8x128, DataType/Instruction = b1 ^ b1 + s32 => s32 ///////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_wmma_tensor_op_s32, 128x256x512_64x64x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_wmma_tensor_op_s32, 256x128x512_64x64x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_wmma_tensor_op_s32, 128x128x512_64x64x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_wmma_tensor_op_s32, 64x128x512_32x64x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 512>, + cutlass::gemm::GemmShape<32, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_wmma_tensor_op_s32, 128x64x512_64x32x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 512>, + cutlass::gemm::GemmShape<64, 32, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32n_wmma_tensor_op_s32, 64x64x512_32x32x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<32, 32, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +#endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..bfe281fb31d2a423bd6a9780c493201b7a846ea7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_tensor_op_s32_sm75.cu @@ -0,0 +1,230 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_tensor_op_s32, 128x256x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_tensor_op_s32, 256x128x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_tensor_op_s32, 128x128x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_tensor_op_s32, 64x256x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_tensor_op_s32, 256x64x512_64x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 64, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} +TEST(SM75_Device_Gemm_b1t_b1n_s32t_tensor_op_s32, 64x128x512_32x64x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 512>, + cutlass::gemm::GemmShape<32, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_tensor_op_s32, 128x64x512_64x32x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 512>, + cutlass::gemm::GemmShape<64, 32, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_tensor_op_s32, 64x64x512_32x32x512) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<32, 32, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..895d3bacf101ec79b8815e52e080e98978329bc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_tensor_op_s32_sm80.cu @@ -0,0 +1,378 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_B1_XOR_SM80_ENABLED) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 128x256x1024_64x64x1024, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, + cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 256x128x1024_64x64x1024, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 128x128x1024_64x64x1024, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, + cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 256x64x1024_64x64x1024, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 64x256x1024_64x64x1024, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 1024>, + cutlass::gemm::GemmShape<64, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 64x128x1024_32x64x1024, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 1024>, + cutlass::gemm::GemmShape<32, 64, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 128x64x1024_64x32x1024, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 1024>, + cutlass::gemm::GemmShape<64, 32, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 64x64x1024_32x32x1024, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 1024>, + cutlass::gemm::GemmShape<32, 32, 1024>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 128x256x512_64x64x512, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 256x128x512_64x64x512, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 128x128x512_64x64x512, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 256x64x512_64x64x512, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 64x256x512_64x64x512, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 64x128x512_32x64x512, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 512>, + cutlass::gemm::GemmShape<32, 64, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 128x64x512_64x32x512, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 512>, + cutlass::gemm::GemmShape<64, 32, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_XOR_b1t_b1n_s32t_tensor_op_s32, 64x64x512_32x32x512, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, cutlass::layout::RowMajor, cutlass::uint1b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<32, 32, 512>, cutlass::gemm::GemmShape<16, 8, 256>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6, 128, 128, + false, cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_B1_XOR_SM80_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_wmma_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_wmma_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..505405e8b10b835110dea73d895d4a8732ab8db8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_b1t_b1n_s32t_wmma_tensor_op_s32_sm75.cu @@ -0,0 +1,242 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" +///////////////////////////////////////////////////////////////////////////////////////////////// +////// WMMA Instruction Shape = 8x8x128, DataType/Instruction = b1 ^ b1 + s32 => s32 ///////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_wmma_tensor_op_s32, 128x256x512_64x64x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_wmma_tensor_op_s32, 256x128x512_64x64x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_wmma_tensor_op_s32, 128x128x512_64x64x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 512>, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_wmma_tensor_op_s32, 64x128x512_32x64x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 512>, + cutlass::gemm::GemmShape<32, 64, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_wmma_tensor_op_s32, 128x64x512_64x32x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 512>, + cutlass::gemm::GemmShape<64, 32, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_b1t_b1n_s32t_wmma_tensor_op_s32, 64x64x512_32x32x512_8x8x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::uint1b_t, + cutlass::layout::RowMajor, + cutlass::uint1b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 512>, + cutlass::gemm::GemmShape<32, 32, 512>, + cutlass::gemm::GemmShape<8, 8, 128>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, 128, 128, false, + cutlass::arch::OpXorPopc>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} +#endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_bf16n_bf16n_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_bf16n_bf16n_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..a25d8aaf86daf0df8fe03505964717321237fac8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_bf16n_bf16n_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,359 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 256x128x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 256x64x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 64x256x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 64x128x32_32x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 128x64x32_64x32x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32, 64x64x32_32x32x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, + cutlass::bfloat16_t, cutlass::layout::ColumnMajor, ElementOutput, + cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_bf16t_bf16t_bf16t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_bf16t_bf16t_bf16t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..3dfd4f15c6b24c49ec862440691a2e1454403737 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_bf16t_bf16t_bf16t_tensor_op_f32_sm80.cu @@ -0,0 +1,343 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 128x256x32_64x64x32) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 256x128x32_64x64x32) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 128x128x32_64x64x32) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 256x64x32_64x64x32) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 64x256x32_64x64x32) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 64x128x32_32x64x32) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 128x64x32_64x32x32) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32, 64x64x32_32x32x32) { + using ElementOutput = cutlass::bfloat16_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::bfloat16_t, cutlass::layout::RowMajor, cutlass::bfloat16_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..00c64b7dc5d5b99319cf2c10d3f4066661fa439b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32_sm80.cu @@ -0,0 +1,259 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_complex.h" + + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Operands data type: complex +// Rounding: float -> tfloat32_t (half_ulp_truncate) +// Instruction operand data type: tfloat32_t (real part) and tfloat32_t (imaginary part) +// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +// Instruction output/accumulation data type: f32 (real part) and f32 (imaginary part) +// Output data type: complex +///////////////////////////////////////////////////////////////////////////////////////////////// + + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32, 32x32x16_16x16x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32, 64x64x16_16x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32, 64x64x16_32x32x16) { + + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32, 128x64x16_64x32x16) { + + using Element = cutlass::complex;; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32, 64x128x16_32x64x16) { + + using Element = cutlass::complex;; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32, 128x128x16_32x64x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..146e2ecacb094a38472c257049b197524afd35d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_complex.h" + + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Operands data type: complex +// Rounding: float -> tfloat32_t (round to nearest) +// Instruction operand data type: tfloat32_t (real part) and tfloat32_t (imaginary part) +// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +// Instruction output/accumulation data type: f32 (real part) and f32 (imaginary part) +// Output data type: complex +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32, 32x32x16_16x16x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32, 64x64x16_16x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32, 64x64x16_32x32x16) { + + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32, 128x64x16_64x32x16) { + + using Element = cutlass::complex;; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32, 64x128x16_32x64x16) { + + using Element = cutlass::complex;; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_tensor_op_tf32_f32, 128x128x16_32x64x16) { + + using Element = cutlass::complex;; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..c30e23087f6d39f5deabcf0b13f41bbba0801573 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_sm90.cu @@ -0,0 +1,251 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with Hopper FP64 +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_complex.h" + + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64n_cf64t_cf64t_tensor_op_f64, 32x32x16_16x16x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64n_cf64t_cf64t_tensor_op_f64, 32x32x8_16x16x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<16, 16, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64n_cf64t_cf64t_tensor_op_f64, 64x64x16_16x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64n_cf64t_cf64t_tensor_op_f64, 64x64x8_16x32x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<16, 32, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64n_cf64t_cf64t_tensor_op_f64, 64x64x16_32x32x16) { + + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64n_cf64t_cf64t_tensor_op_f64, 64x64x8_32x32x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..60ec7a8ac2c2b540538b4d1e31bcabf9e30e6ee5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu @@ -0,0 +1,197 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_complex.h" + + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian, 32x32x8_16x16x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<16, 16, 8>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian, 64x64x8_32x16x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 16, 8>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian, 64x64x16_32x16x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..c4deb60bc9f2ec51c54fc875e44acdf57fe1591a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian_sm90.cu @@ -0,0 +1,196 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with Hopper FP64 +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_complex.h" + + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian, 32x32x8_16x16x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<16, 16, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian, 64x64x8_32x16x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 16, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64_gaussian, 64x64x16_32x16x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..233e58d1f448ec245bed619a7391f49eed7b4b97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_cf64t_cf64n_cf64t_tensor_op_f64_sm90.cu @@ -0,0 +1,303 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with Hopper FP64 +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_complex.h" + + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64, 32x32x8_16x16x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<16, 16, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64, 64x64x8_32x32x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64, 64x128x8_32x32x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 128, 8>, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64, 128x64x8_32x32x8) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 128, 8>, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64, 32x32x16_16x16x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64, 64x64x16_32x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64, 64x128x16_32x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM90_Device_Gemm_cf64t_cf64n_cf64t_tensor_op_f64, 128x64x16_32x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_direct_store_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_direct_store_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..5ebd2e4be2b572094918cbcfc44ea3fdeec66cc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_direct_store_tensor_op_f32_sm80.cu @@ -0,0 +1,114 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/kernel/gemm_universal.h" +#include "cutlass/gemm/device/gemm_universal.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_universal.h" + +//////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/epilogue/threadblock/epilogue_direct_store.h" +#include "cutlass/epilogue/threadblock/default_epilogue_direct_store.h" + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_DirectStore_f16n_f16t_f32n_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + // Define the GEMM kernel + using GemmBase = cutlass::gemm::device::GemmUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 4, // This is the vector size of the epilogue. + ElementAccumulator, + ElementAccumulator>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 8, + 8 + >; + + // Define the direct store epilogue + using EpilogueDirectStore = typename cutlass::epilogue::threadblock::DefaultEpilogueDirectStore< + typename GemmBase::GemmKernel::Epilogue + >::Epilogue; + + // Define a new kernel + using Kernel = cutlass::gemm::kernel::GemmUniversal< + typename GemmBase::GemmKernel::Mma, + EpilogueDirectStore, + typename GemmBase::GemmKernel::ThreadblockSwizzle + >; + + // Define the adaptor + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_wmma_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_wmma_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..4fc49a0fffadce66404dfd2a29d5e2426562f3a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_wmma_tensor_op_f16_sm70.cu @@ -0,0 +1,157 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16n_f16n_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..f4912eef152d9a053d4d565418e424a5ba87b007 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16n_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,154 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16n_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16n_wmma_tensor_op_f32, 64x64x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16n_wmma_tensor_op_f32, 64x64x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..2808f9ddc136f5851528e61c96f0a636bc65e6ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sm75.cu @@ -0,0 +1,307 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x256x32_64x64x32_brief) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x128x32_64x64x32_brief) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x128x32_32x64x32_brief) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..274d41fa6a693641295603f5b96aa35d7f339f46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sm80.cu @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x256x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x128x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x128x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x64x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x256x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x128x32_32x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x64x32_64x32x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x64x32_32x32x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..6872a4168da3aa9e43068e7f9e87ddb4294b5d67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,291 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" +#include "cutlass/gemm/device/gemm_sparse_row_broadcast.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x128x128_64x64x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x64x128_64x64x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x64x128_64x32x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x64x128_32x32x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_Row_Broadcast_f16n_f16n_f16t_tensor_op_f32, 64x64x128_32x32x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemmRowBroadcast< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm(true)); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_volta_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_volta_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..9bdf56def593933be122bc338fa6f8f81f998e49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_volta_tensor_op_f32_sm70.cu @@ -0,0 +1,274 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_volta_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_volta_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_volta_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_volta_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_volta_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_volta_tensor_op_f32, 64x64x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_volta_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_wmma_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_wmma_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..411e95b4e931bc4d54e74009172a947a6b679d82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_wmma_tensor_op_f16_sm70.cu @@ -0,0 +1,404 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 64x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 128x64x32_64x64x32_16x16x16) { + // single cta, two warps vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 64x128x32_32x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..c99f75a4758f0cf451f5bb8872aca1134848fc4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,403 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 64x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 128x64x32_64x64x32_16x16x16) { + // single cta, two warps vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 64x128x32_32x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_tensor_op_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_tensor_op_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..6bf8ae59e2bef8c0a4db8c82b9ed0bd5e3da7c59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_tensor_op_f32_sm75.cu @@ -0,0 +1,307 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x256x32_64x64x32_brief) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x128x32_64x64x32_brief) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x128x32_32x64x32_brief) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..5c398c33b8b68a9133322251b9872c6f958272ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_tensor_op_f32_sm80.cu @@ -0,0 +1,343 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x256x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 256x128x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x128x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 256x64x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x256x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x128x32_32x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 128x64x32_64x32x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32, 64x64x32_32x32x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..4ff878b769f4d3f60d9dee737c0dfb3497879f7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32n_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,159 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16n_f16n_f32n_wmma_tensor_op_f32, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..a123d299224535f851ffa035f7dfd118810b55c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sm75.cu @@ -0,0 +1,307 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x256x32_64x64x32_brief) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x128x32_64x64x32_brief) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x128x32_32x64x32_brief) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..4811c9dbbcf4fbf0eed1cc8f9ceb4afb5ce300a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,346 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x256x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 256x128x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x128x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 256x64x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x256x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x128x64_32x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x64x64_64x32x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x64x64_32x32x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x256x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 256x128x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x128x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 256x64x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x256x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x128x32_32x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x64x32_64x32x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x64x32_32x32x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..4c7ce791160ded9282daae8992013d2e16afb765 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,273 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x128x128_64x64x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 256x64x128_64x64x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 128x64x128_64x32x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16n_f32t_tensor_op_f32, 64x64x128_32x32x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_volta_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_volta_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..bba2b6d810b558216bb84e5f52682605626c6e32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_volta_tensor_op_f32_sm70.cu @@ -0,0 +1,274 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_volta_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_volta_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_volta_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_volta_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_volta_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_volta_tensor_op_f32, 64x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_volta_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..350d7e940d859aa323c188fa29ec0856cd61caa9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f32t_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 64x128x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16n_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16n_wmma_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16n_wmma_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..eff38fdf369f71933702177b41aa52299be6da2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16n_wmma_tensor_op_f16_sm70.cu @@ -0,0 +1,157 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16n_f16t_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16t_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16t_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_slicedk_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_slicedk_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..7392cf9e51bec237035cdd517a26cd95fef26c14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_slicedk_sm75.cu @@ -0,0 +1,88 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16n_f16t_f16t_tensor_op_f16_sliced_k, 64x64x64_64x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM75_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_slicedk_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_slicedk_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..468e6988d4b25988c0ad2c8f56c2284811b7d6d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_slicedk_sm80.cu @@ -0,0 +1,88 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16_sliced_k, 128x64x64_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..5fd2fb7d1e2cc96ad22205080a016917d67764aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm75.cu @@ -0,0 +1,243 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x256x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 256x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x64x32_64x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x64x32_32x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..90fd6d02229397fe68abbd0dc4a59b33e57b0804 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm80.cu @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 256x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 256x64x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64> , + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x128x64_32x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x64x64_64x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x64x64_32x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x256x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 256x128x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x128x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 256x64x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x256x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x128x32_32x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x64x32_64x32x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x64x32_32x32x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..ebe3acb327c39fbaab0757fe73375da169373244 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sparse_sm80.cu @@ -0,0 +1,271 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 256x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 256x64x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64> , + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x128x64_32x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x64x64_64x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x64x64_32x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x128x128_64x64x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 256x64x128_64x64x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 128x64x128_64x32x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f16t_tensor_op_f16, 64x64x128_32x32x128) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..969f54b7ae3bb8f69537ffce69581db561d90857 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f16_sm70.cu @@ -0,0 +1,267 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16n_f16t_f16t_volta_tensor_op_f16, 128x256x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f16t_volta_tensor_op_f16, 256x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f16t_volta_tensor_op_f16, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f16t_volta_tensor_op_f16, 128x64x32_64x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f16t_volta_tensor_op_f16, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f16t_volta_tensor_op_f16, 64x64x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f16t_volta_tensor_op_f16, 64x64x32_32x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b15b0d53d8d1432a569bcf49e90838bdf56f0b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f16t_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,87 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16t_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32n_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32n_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..fe7e1b0c207fe624d4d41042a82016c00e283b97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32n_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,159 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16n_f16t_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16t_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16t_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..3b3d2938d2eb2564fe44b3221fe5f700677e03b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sm75.cu @@ -0,0 +1,243 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..7387b99239b49c7997ece6c3a1d5f1a7eab7c2c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,384 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" +#include "testbed_universal.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x256x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 256x128x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x128x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x128x64_64x64x64_sk, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmUniversal< + cutlass::half_t, cutlass::layout::ColumnMajor, + cutlass::half_t, cutlass::layout::RowMajor, + ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::ThreadblockSwizzleStreamK, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32n_tensor_op_f32, 128x128x64_64x64x64_sk, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmUniversal< + cutlass::half_t, cutlass::layout::ColumnMajor, + cutlass::half_t, cutlass::layout::RowMajor, + ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::ThreadblockSwizzleStreamK, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 256x64x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x256x64_64x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x128x64_32x64x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x64x64_64x32x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x64x64_32x32x64, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x256x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 256x128x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x128x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 256x64x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x256x32_64x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x128x32_32x64x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x64x32_64x32x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x64x32_32x32x32, { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_MMA_SM80_SUPPORTED + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..60a476021fa94287d83460312da04e19e2d37161 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,272 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_sparse.h" + +#if (CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x128x128_64x64x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 256x64x128_64x64x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 128x64x128_64x32x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16n_f16t_f32t_tensor_op_f32, 64x64x128_32x32x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_volta_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_volta_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..02c4ddf00e2958152fcd4e87d72819b6be1e7b11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_volta_tensor_op_f32_sm70.cu @@ -0,0 +1,267 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_volta_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_volta_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_volta_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_volta_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_volta_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_volta_tensor_op_f32, 64x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_volta_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ENABLE_TENSOR_CORE_MMA) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..ef37420f675649d8ff0b699b4a15ff67d58d5ed7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16t_f32t_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 64x128x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16n_f16t_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16n_wmma_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16n_wmma_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..9156d8e369b3b70860a56b2282a916b883fdc89c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16n_wmma_tensor_op_f16_sm70.cu @@ -0,0 +1,157 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16t_f16n_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16n_wmma_tensor_op_f16, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16n_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16n_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..8bfed3f97b22a65ab082dad4fa9eb2eba64ce737 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16n_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,155 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16n_wmma_tensor_op_f32, 128x128x32_64x64x16_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16n_wmma_tensor_op_f32, 64x64x32_64x64x16_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16n_wmma_tensor_op_f32, 64x64x32_64x64x16_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..9e5973ab254df67e928bf5c800c9ebc22de23699 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16_sm70.cu @@ -0,0 +1,321 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16, 64x128x32_64x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16, 64x64x64_32x32x64_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16, 128x128x64_64x32x64_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16, 128x128x32_64x32x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16t_singlestage_wmma_tensor_op_f16, 128x128x32_64x32x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_broadcast_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_broadcast_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..71121d141ff09398dbc9e7252a646c0c5f044550 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_broadcast_sm80.cu @@ -0,0 +1,439 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for GEMM + broadcast interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/functional.h" + +#include "cutlass/gemm/kernel/default_gemm_with_broadcast.h" +#include "cutlass/gemm/device/gemm_universal.h" +#include "cutlass/gemm/device/gemm_universal_with_broadcast.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +#include "cutlass/epilogue/thread/activation.h" +#include "cutlass/epilogue/thread/linear_combination_bias_relu.h" +#include "cutlass/epilogue/thread/linear_combination_residual_block.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_elementwise.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" + +template +struct TestbedUtils { + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + cutlass::HostTensor tensor_A; // Input A + cutlass::HostTensor tensor_B; // Input B + cutlass::HostTensor tensor_C; // Input C + cutlass::HostTensor tensor_D1; // Input D + cutlass::HostTensor tensor_D2; // Input D + cutlass::HostTensor tensor_Y1; // Input Y + cutlass::HostTensor tensor_Y2; // Input Y + cutlass::HostTensor tensor_Y_ref; + + // + // Methods + // + + TestbedUtils( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::AllZeros) { + cutlass::reference::host::TensorFill(view); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size) { + // + // Allocate the GEMM workspace + // + + tensor_A.resize(problem_size.mk()); + tensor_B.resize(problem_size.kn()); + tensor_C.resize({1, problem_size.n()}); + tensor_D1.resize(problem_size.mn()); + tensor_D2.resize(problem_size.mn()); + tensor_Y1.resize(problem_size.mn()); + tensor_Y2.resize(problem_size.mn()); + tensor_Y_ref.resize(problem_size.mn()); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); + + // Initialize D data to smaller data range. This helps avoid large roundoff errors. + int d_scope_min = -2; + int d_scope_max = 2; + cutlass::reference::host::TensorFillRandomUniform(tensor_D1.host_view(), seed + 2016, d_scope_max, d_scope_min, 0); + cutlass::reference::host::TensorFillRandomUniform(tensor_D2.host_view(), seed + 2015, d_scope_max, d_scope_min, 0); + + EXPECT_TRUE(initialize_tensor(tensor_Y1.host_view(), cutlass::Distribution::AllZeros, 0)); + EXPECT_TRUE(initialize_tensor(tensor_Y2.host_view(), cutlass::Distribution::AllZeros, 0)); + EXPECT_TRUE(initialize_tensor(tensor_Y_ref.host_view(), cutlass::Distribution::AllZeros, 0)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = GemmElement(1); + tensor_B.host_view().at({0, 0}) = GemmElement(1); + tensor_C.host_view().at({0, 0}) = GemmElement(1); + tensor_D1.host_view().at({0, 0}) = GemmElement(1); + tensor_D2.host_view().at({0, 0}) = GemmElement(1); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D1.sync_device(); + tensor_D2.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, cutlass::HostTensor& tensor_Y_ref, cutlass::HostTensor& tensor_Y) { + + tensor_Y_ref.sync_host(); + tensor_Y.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D2.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Y_ref.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Y.host_view()), 0); + + bool passed = true; + float norm_diff = 0; + + norm_diff = cutlass::reference::host::TensorNormDiff(tensor_Y_ref.host_view(), tensor_Y.host_view(), float()); + passed = (norm_diff <= 0.1f); + EXPECT_LT(norm_diff, 0.1f) << " tensor_Y is incorrect"; + + + if (!passed) { + std::ofstream file("errors_testbed_gemm_broadcast_new.txt"); + + + file + << "problem: " << problem_size << "\n\n"; + + file + << "capacity: \n" + << "A: " << tensor_A.capacity() + << "\nB: " << tensor_B.capacity() + << "\nC: " << tensor_C.capacity() + << "\nD1: " << tensor_D1.capacity() + << "\nD2: " << tensor_D2.capacity() + << "\nY: " << tensor_Y.capacity() + << "\n\n" + << "\nY_ref: " << tensor_Y_ref.capacity() + << "\n\n"; + file + << "A =\n" << tensor_A.host_view() + << "\n\nB =\n" << tensor_B.host_view() + << "\n\nC =\n" << tensor_C.host_view() + << "\n\nD1 =\n" << tensor_D1.host_view() + << "\n\nD2 =\n" << tensor_D2.host_view() + << "\n\nY =\n" << tensor_Y.host_view() + << "\n\nY_ref =\n" << tensor_Y_ref.host_view(); + } + + return passed; + } +}; + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +TEST(SM80_Device_GemmWithBroadcast_f16t_f16n_f16t_tensor_op_f16, 128x128_32x3_64x64x32_16x8x16) { + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using OpClass = cutlass::arch::OpClassTensorOp; + using ArchTag = cutlass::arch::Sm80; + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle; + const int kStages = 3; + + const int batch_count = 1; + const cutlass::half_t alpha(1); + const cutlass::half_t beta(1); + + const int M = 1024; + const int K = 10240; + const int N = 512; + cutlass::gemm::GemmCoord problem{M, N, K}; + + const int batch_stride_A = 0; + const int batch_stride_B = 0; + const int batch_stride_C1 = 0; + const int batch_stride_C2 = 0; + const int batch_stride_D = 0; + const int batch_stride_Vector = 0; + const int batch_stride_Tensor = 0; + + const int64_t lda = LayoutA::packed({problem.m(), problem.k()}).stride(0); + const int64_t ldb = LayoutB::packed({problem.k(), problem.n()}).stride(0); + const int64_t ldc1 = LayoutC::packed({problem.m(), problem.n()}).stride(0); + const int64_t ldc2 = LayoutC::packed({problem.m(), problem.n()}).stride(0); + const int64_t ldd = LayoutC::packed({problem.m(), problem.n()}).stride(0); + const int64_t ldv = 0; + const int64_t ldt = 0; + + TestbedUtils utils; + utils.initialize(problem); + + // + // Create reference Gemm + // + using GemmRef = cutlass::gemm::device::GemmUniversal< + ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, + OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + ThreadblockSwizzle, kStages>; + + typename GemmRef::Arguments args_ref{ + cutlass::gemm::GemmUniversalMode::kGemm, + problem, + batch_count, + {alpha, beta}, + utils.tensor_A.device_data(), + utils.tensor_B.device_data(), + utils.tensor_C.device_data(), + utils.tensor_Y_ref.device_data(), + batch_stride_A, + batch_stride_B, + batch_stride_C1, + batch_stride_D, + lda, + ldb, + ldv, + ldd, + }; + + GemmRef gemm_op_ref; + size_t workspace_size_ref = GemmRef::get_workspace_size(args_ref); + cutlass::device_memory::allocation workspace_ref(workspace_size_ref); + cutlass::Status status = gemm_op_ref.initialize(args_ref, workspace_ref.get()); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status); + + status = gemm_op_ref(); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status); + + // + // Create GemmWithBroadcast from single source + // + using GemmSingle = cutlass::gemm::device::GemmUniversalWithBroadcast< + ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, + OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, + cutlass::epilogue::thread::LinearCombinationResidualBlock< + ElementOutput, ElementAccumulator, ElementAccumulator, + ElementAccumulator, 128 / cutlass::sizeof_bits::value, + cutlass::epilogue::thread::Identity, cutlass::multiplies, cutlass::epilogue::thread::Identity>, + ThreadblockSwizzle, kStages>; + + typename GemmSingle::Arguments args_single{ + cutlass::gemm::GemmUniversalMode::kGemm, + problem, + batch_count, + {alpha, beta}, + utils.tensor_A.device_data(), + utils.tensor_B.device_data(), + utils.tensor_D1.device_data(), + utils.tensor_Y1.device_data(), + utils.tensor_C.device_data(), + /* ptr_Tensor = */ nullptr, + batch_stride_A, + batch_stride_B, + batch_stride_C1, + batch_stride_D, + batch_stride_Vector, + batch_stride_Tensor, + lda, + ldb, + ldc1, + ldd, + ldv, + ldt + }; + + GemmSingle gemm_op_single; + size_t workspace_size_single = GemmSingle::get_workspace_size(args_single); + cutlass::device_memory::allocation workspace_single(workspace_size_single); + status = gemm_op_single.initialize(args_single, workspace_single.get()); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status); + + status = gemm_op_single(); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status); + + // Compute the broadcast on the reference previously computed and compare results + utils.tensor_Y_ref.sync_host(); + cutlass::reference::host::TensorMul(utils.tensor_Y_ref.host_view(), utils.tensor_D1.host_view()); + utils.tensor_Y_ref.sync_device(); + utils.compare_reference(problem, utils.tensor_Y_ref, utils.tensor_Y1); + + // + // Create GemmWithBroadcast from two sources + // + using GemmDouble = cutlass::gemm::device::GemmUniversalWithBroadcast< + ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, + OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, + cutlass::epilogue::thread::LinearCombinationResidualBlock< + ElementOutput, ElementAccumulator, ElementAccumulator, + ElementAccumulator, 128 / cutlass::sizeof_bits::value, + cutlass::epilogue::thread::Identity, cutlass::multiplies, cutlass::epilogue::thread::Identity, cutlass::plus>, + ThreadblockSwizzle, kStages>; + + typename GemmDouble::Arguments args_double{ + cutlass::gemm::GemmUniversalMode::kGemm, + problem, + batch_count, + {alpha, beta}, + utils.tensor_A.device_data(), + utils.tensor_B.device_data(), + utils.tensor_D1.device_data(), + utils.tensor_D2.device_data(), + utils.tensor_Y2.device_data(), + utils.tensor_C.device_data(), + /* ptr_Tensor = */ nullptr, + batch_stride_A, + batch_stride_B, + batch_stride_C1, + batch_stride_C2, + batch_stride_D, + batch_stride_Vector, + batch_stride_Tensor, + lda, + ldb, + ldc1, + ldc2, + ldd, + ldv, + ldt + }; + + GemmDouble gemm_op_double; + size_t workspace_size_double = GemmDouble::get_workspace_size(args_double); + cutlass::device_memory::allocation workspace_double(workspace_size_double); + status = gemm_op_double.initialize(args_double, workspace_double.get()); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status); + + status = gemm_op_double(); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status); + + // Compute the broadcast on the reference previously computed and compare results + utils.tensor_Y_ref.sync_host(); + cutlass::reference::host::TensorAdd(utils.tensor_Y_ref.host_view(), utils.tensor_D2.host_view()); + utils.tensor_Y_ref.sync_device(); + utils.compare_reference(problem, utils.tensor_Y_ref, utils.tensor_Y2); +} + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_slicedk_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_slicedk_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..08819642fb7ac4926a75d5a0bf8267b6e380e2a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_slicedk_sm80.cu @@ -0,0 +1,89 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface + +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16_sliced_k, 128x64x64_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..c3b3094e45e0081316c9c79fb2d9de35ad6673c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_sm75.cu @@ -0,0 +1,242 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x256x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 256x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x64x32_64x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 64x64x32_32x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..0343f0b090086007b6642afbb4d5ae8683e03cf5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_sm80.cu @@ -0,0 +1,345 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 256x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x128x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 256x64x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 64x256x64_64x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 64x128x64_32x64x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x64x64_64x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 64x64x64_32x32x64) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x256x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 256x128x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x128x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 256x64x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 64x256x32_64x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 64x128x32_32x64x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 128x64x32_64x32x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16, 64x64x32_32x32x32) { + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_MMA_SM80_SUPPORTED + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_volta_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_volta_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..f9861135c6bfaf9a41124a830e4912331d9b096b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_volta_tensor_op_f16_sm70.cu @@ -0,0 +1,274 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_volta_tensor_op_f16, 128x256x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_volta_tensor_op_f16, 256x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_volta_tensor_op_f16, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_volta_tensor_op_f16, 128x64x32_64x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_volta_tensor_op_f16, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_volta_tensor_op_f16, 64x64x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_volta_tensor_op_f16, 64x64x32_32x32x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ENABLE_TENSOR_CORE_MMA) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_wmma_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_wmma_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..be966e20defb4fef2b210c7fa09124861ab6a47b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_wmma_tensor_op_f16_sm70.cu @@ -0,0 +1,405 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 64x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 128x64x32_64x64x32_16x16x16) { + // single cta, two warps vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 64x128x32_32x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..afd4fbfb2e2add4de59654fd81aa5ef99acc7e05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,402 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 64x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 128x64x32_64x64x32_16x16x16) { + // single cta, two warps vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 64x128x32_32x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32n_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32n_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..5f6b77d50736bc65d376e594cd4a7dc86f9a3ea6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32n_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,158 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..fab5576ee3b7a4a9f459690dca60f6490a36615f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,226 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32, 64x128x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32, 128x128x32_64x32x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f32t_singlestage_wmma_tensor_op_f32, 128x128x32_64x32x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + static const int kStages = 1; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..5aa81be7530fd862453a67a9116eb6ac040fc8f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sm75.cu @@ -0,0 +1,243 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..708b4dfbec82100c77212a450745403afccb9555 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 256x128x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 256x64x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x256x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x128x32_32x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x64x32_64x32x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x64x32_32x32x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_MMA_SM80_SUPPORTED + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..e1d938150707573d0a5cefcd7144ef8bffa60e38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,271 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x128x128_64x64x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 256x64x128_64x64x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 128x64x128_64x32x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16n_f32t_tensor_op_f32, 64x64x128_32x32x128) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..70298cf8b3e1024f7e6feea150c9080e921a13f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f32t_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 64x128x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16n_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..6fcdcee37d49c4c64852beb1a67e320f10d246f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,155 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16n_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16n_wmma_tensor_op_f32, 64x64x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16n_wmma_tensor_op_f32, 64x64x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16t_wmma_tensor_op_f16_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16t_wmma_tensor_op_f16_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..71fea9298b7d11de274381adf64c216aca46619a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16t_wmma_tensor_op_f16_sm70.cu @@ -0,0 +1,405 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 64x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 128x64x32_64x64x32_16x16x16) { + // single cta, two warps vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 64x128x32_32x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f16, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16t_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16t_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..fc980ac8ada12b632573cf2607d8716972d24252 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f16t_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,403 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 64x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 128x64x32_64x64x32_16x16x16) { + // single cta, two warps vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + // single cta, two warps horizontally two waprs vertically + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 64x128x32_32x64x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_32x8x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F16=>F16 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f16t_wmma_tensor_op_f32, 64x64x32_64x64x32_8x32x16) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_tensor_op_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_tensor_op_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..9612d7607831ca98a89adcfb89c48608ca3155d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_tensor_op_f32_sm75.cu @@ -0,0 +1,243 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..f89b0764cd59b259d4ed30d046743d9def0bde7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_tensor_op_f32_sm80.cu @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x256x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 256x128x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x128x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 256x64x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 64x256x32_64x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 64x128x32_32x64x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 128x64x32_64x32x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32, 64x64x32_32x32x32) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..6a32a33305afec8597e5f3ca959c59a0bd53f50b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32n_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,156 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f32n_wmma_tensor_op_f32, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_tensor_op_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_tensor_op_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..d21ee877357ab9003d22a5147508ed0b4b366590 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_tensor_op_f32_sm75.cu @@ -0,0 +1,243 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_f16t_f16t_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_f16t_f16t_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..3bafd4dc486c2356ace55d0032addca4b9860f7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,199 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f16t_f16t_f32t_tensor_op_f32, 128x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16t_f32t_tensor_op_f32, 256x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16t_f32t_tensor_op_f32, 128x128x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16t_f32t_tensor_op_f32, 256x64x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16t_f32t_tensor_op_f32, 64x256x64_64x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16t_f32t_tensor_op_f32, 64x128x64_32x64x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16t_f32t_tensor_op_f32, 128x64x64_64x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f16t_f16t_f32t_tensor_op_f32, 64x64x64_32x32x64) { + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, + cutlass::layout::RowMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_volta_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_volta_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..b2a23970c1437d74e925a7796c578f635f6839a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_volta_tensor_op_f32_sm70.cu @@ -0,0 +1,243 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_volta_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_volta_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_volta_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_volta_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_volta_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_volta_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ENABLE_TENSOR_CORE_MMA) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_wmma_tensor_op_f32_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_wmma_tensor_op_f32_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..45723563dc63b6cdc4ae8d570dd1efbdeabf8836 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16t_f32t_wmma_tensor_op_f32_sm70.cu @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 16x16x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 128x256x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 256x128x32_64x64x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 128x64x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 64x128x32_64x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 64x64x32_32x32x32_16x16x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 32x8x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_32x8x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x32x16, DataType/Instruction = F16*F16+F32=>F32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_Device_Gemm_f16t_f16t_f32t_wmma_tensor_op_f32, 128x128x32_64x64x32_8x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_bf16_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_bf16_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..c83f7a70ff11542bb73a82a7773b14ae04a27ee5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_bf16_f32_sm80.cu @@ -0,0 +1,93 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface using BF16. +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/arch/mma.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 4, + 4, + false, + cutlass::arch::OpMultiplyAddFastBF16 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..bd71b19669438a9e5e75819fb53751055da0d952 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,88 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..895f175bba09d38662c92f9e16d7b6a991c22782 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,429 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 128x128x64_64x64x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 256x64x64_64x64x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 128x64x64_64x32x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32n_f32t_tensor_op_f32, 64x64x64_32x32x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32t_f32t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32t_f32t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..c37d48cb393b22676b0f7193d08586f7bf259490 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32n_f32t_f32t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,429 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 128x128x64_64x64x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 256x64x64_64x64x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 128x64x64_64x32x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32n_f32t_f32t_tensor_op_f32, 64x64x64_32x32x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32t_f32n_f32t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32t_f32n_f32t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..43bb12967ccd4d7807d67db0154173264991a628 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32t_f32n_f32t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,428 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 128x128x64_64x64x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 256x64x64_64x64x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 128x64x64_64x32x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32n_f32t_tensor_op_f32, 64x64x64_32x32x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32t_f32t_f32t_tensor_op_f32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32t_f32t_f32t_tensor_op_f32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..45008355f1ea7d4a3c9709ff529bf24c2f94f08e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f32t_f32t_f32t_tensor_op_f32_sparse_sm80.cu @@ -0,0 +1,429 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 128x128x64_64x64x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 256x64x64_64x64x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 128x64x64_64x32x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_f32t_f32t_f32t_tensor_op_f32, 64x64x64_32x32x64) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::SparseGemm< + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..d733d8d313b0b98f056a6a1b1c371ab6c2c722ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 64x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64an_f64at_f64at_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using LayoutA = cutlass::layout::AffineRank2ColumnMajor; + using LayoutB = cutlass::layout::AffineRank2RowMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + double, + LayoutA, + double, + LayoutB, + ElementOutput, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..6d704cc03f9e4770ac5d3484b70d4b5a225ae828 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu @@ -0,0 +1,222 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with Hopper FP64 +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 32x32x16_16x16x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 64x64x16_32x32x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 128x64x16_64x32x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 64x128x16_32x64x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64n_f64t_f64t_tensor_op_f64, 128x128x16_32x64x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64t_f64n_f64t_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64t_f64n_f64t_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..8961ab784308688b4cc59ce330e97ec2234234d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64t_f64n_f64t_tensor_op_f64_sm80.cu @@ -0,0 +1,259 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 64x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64at_f64an_f64at_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using LayoutA = cutlass::layout::AffineRank2RowMajor; + using LayoutB = cutlass::layout::AffineRank2ColumnMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + double, + LayoutA, + double, + LayoutB, + ElementOutput, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64t_f64n_f64t_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64t_f64n_f64t_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..211d3bfdddd259eb3bf5750877acc93aa7463517 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_f64t_f64n_f64t_tensor_op_f64_sm90.cu @@ -0,0 +1,222 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with Hopper FP64 +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 32x32x16_16x16x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 64x64x16_32x32x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 64x128x16_32x64x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 128x64x16_64x32x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f64t_f64n_f64t_tensor_op_f64, 128x128x16_32x64x16_16x8x4) { + + using ElementOutput = double; + using ElementAccumulator = double; + using ElementCompute = double; + + using Gemm = cutlass::gemm::device::Gemm< + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_grouped_scheduler_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_grouped_scheduler_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..4fed1dc10ec4bfce38962d28bf1c3d5fe03228d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_grouped_scheduler_sm80.cu @@ -0,0 +1,222 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped GEMM problem visitors +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/gemm_grouped.h" +#include "cutlass/gemm/kernel/default_gemm_grouped.h" +#include "cutlass/gemm/device/gemm_grouped.h" + +#include "testbed_grouped_scheduler.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Run a series of tests on the testbed +template +void run_tests() { + for (int scale_factor : {8, 16, 32, 64}) { + for (int threadblock_count : {54, 108, 216, 324, 432}) { + for (int problems : {1, 27, 180, 300}) { + Testbed testbed; + testbed.run(problems, threadblock_count, scale_factor); + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmGroupedScheduler_p128_t128, 64x64x32) { + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + static int const kNumPrefetch = 128; + static int const kThreadCount = 128; + static bool const kTranspose = false; + + using Testbed = test::gemm::device::TestbedGroupedGemmScheduler< + ThreadblockShape, + kNumPrefetch, + kThreadCount, + kTranspose, + // List of GroupScheduleModes to compare. List must contain at least two. + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, + cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>; + run_tests(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmGroupedScheduler_p128_t128_transpose, 64x64x32) { + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + static int const kNumPrefetch = 128; + static int const kThreadCount = 128; + static bool const kTranspose = true; + + using Testbed = test::gemm::device::TestbedGroupedGemmScheduler< + ThreadblockShape, + kNumPrefetch, + kThreadCount, + kTranspose, + // List of GroupScheduleModes to compare. List must contain at least two. + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, + cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>; + run_tests(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmGroupedScheduler_p256_t256, 64x64x32) { + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + static int const kNumPrefetch = 256; + static int const kThreadCount = 256; + static bool const kTranspose = false; + + using Testbed = test::gemm::device::TestbedGroupedGemmScheduler< + ThreadblockShape, + kNumPrefetch, + kThreadCount, + kTranspose, + // List of GroupScheduleModes to compare. List must contain at least two. + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, + cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>; + run_tests(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmGroupedScheduler_p256_t128, 64x64x32) { + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + static int const kNumPrefetch = 256; + static int const kThreadCount = 128; + static bool const kTranspose = false; + + using Testbed = test::gemm::device::TestbedGroupedGemmScheduler< + ThreadblockShape, + kNumPrefetch, + kThreadCount, + kTranspose, + // List of GroupScheduleModes to compare. List must contain at least two. + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, + cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>; + run_tests(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmGroupedScheduler_p256_t256, 64x32x32) { + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 32>; + static int const kNumPrefetch = 256; + static int const kThreadCount = 256; + static bool const kTranspose = false; + + using Testbed = test::gemm::device::TestbedGroupedGemmScheduler< + ThreadblockShape, + kNumPrefetch, + kThreadCount, + kTranspose, + // List of GroupScheduleModes to compare. List must contain at least two. + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, + cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>; + run_tests(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmGroupedScheduler_p256_t256_transpose, 64x32x32) { + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 32>; + static int const kNumPrefetch = 256; + static int const kThreadCount = 256; + static bool const kTranspose = true; + + using Testbed = test::gemm::device::TestbedGroupedGemmScheduler< + ThreadblockShape, + kNumPrefetch, + kThreadCount, + kTranspose, + // List of GroupScheduleModes to compare. List must contain at least two. + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, + cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>; + run_tests(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmGroupedScheduler_p256_t256, 32x64x32) { + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 32>; + static int const kNumPrefetch = 256; + static int const kThreadCount = 256; + static bool const kTranspose = false; + + using Testbed = test::gemm::device::TestbedGroupedGemmScheduler< + ThreadblockShape, + kNumPrefetch, + kThreadCount, + kTranspose, + // List of GroupScheduleModes to compare. List must contain at least two. + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, + cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>; + run_tests(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmGroupedScheduler_p256_t256_transpose, 32x64x32) { + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 32>; + static int const kNumPrefetch = 256; + static int const kThreadCount = 256; + static bool const kTranspose = true; + + using Testbed = test::gemm::device::TestbedGroupedGemmScheduler< + ThreadblockShape, + kNumPrefetch, + kThreadCount, + kTranspose, + // List of GroupScheduleModes to compare. List must contain at least two. + cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, + cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>; + run_tests(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..83e7cfa2de53dc9f6be6aff587799c863e4f62e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm70.cu @@ -0,0 +1,353 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-level GEMM API for Planar Complex. +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/kernel/default_gemm_planar_complex_universal.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +#include "testbed_planar_complex.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s884_tn_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s884_tn : gemm_planar_complex_s884_tn_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmPlanarComplex_f16t_f16n_f32n_tensor_op_f32_884, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s884_nt_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s884_nt : gemm_planar_complex_s884_nt_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmPlanarComplex_f16n_f16t_f32n_tensor_op_f32_884, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s884_nn_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s884_nn : gemm_planar_complex_s884_nn_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmPlanarComplex_f16n_f16n_f32n_tensor_op_f32_884, 128x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_f16_s884_f16_nn_128x64_32x2_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + cutlass::half_t, + 8, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_f16_s884_f16_nn_128x64_32x2 : gemm_planar_complex_f16_s884_f16_nn_128x64_32x2_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmPlanarComplex_f16n_f16n_f16n_tensor_op_f32_884, 128x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_f16_s884_f16_nn_64x128_32x2_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + cutlass::half_t, + 8, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_f16_s884_f16_nn_64x128_32x2 : gemm_planar_complex_f16_s884_f16_nn_64x128_32x2_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmPlanarComplex_f16n_f16n_f16n_tensor_op_f32_884, 64x128x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_f16_s884_f16_tt_128x64_32x2_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + cutlass::half_t, + 8, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_f16_s884_f16_tt_128x64_32x2 : gemm_planar_complex_f16_s884_f16_tt_128x64_32x2_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmPlanarComplex_f16t_f16t_f16n_tensor_op_f32_884, 128x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_f16_s884_f16_tt_64x128_32x2_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + cutlass::half_t, + 8, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_f16_s884_f16_tt_64x128_32x2 : gemm_planar_complex_f16_s884_f16_tt_64x128_32x2_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmPlanarComplex_f16t_f16t_f16n_tensor_op_f32_884, 64x128x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..1f702ab00ca681dcf0e103310a03b0cd6d8e4b1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm75.cu @@ -0,0 +1,223 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-level GEMM API for Planar Complex. +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/kernel/default_gemm_planar_complex_universal.h" +#include "cutlass/gemm/device/gemm_universal_base.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +#include "testbed_planar_complex.h" + + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s1688_tn_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s1688_tn : gemm_planar_complex_s1688_tn_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmPlanarComplex_f16t_f16n_f32n_tensor_op_f32_1688, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s1688_hc_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kConjugate, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s1688_hc : gemm_planar_complex_s1688_hc_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmPlanarComplex_f16h_f16c_f32n_tensor_op_f32_1688, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s1688_nt_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s1688_nt : gemm_planar_complex_s1688_nt_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmPlanarComplex_f16n_f16t_f32n_tensor_op_f32_1688, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s1688_ch_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kConjugate, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s1688_ch : gemm_planar_complex_s1688_ch_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmPlanarComplex_f16c_f16h_f32n_tensor_op_f32_1688, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..beed868fc1df68dfd2083f4250c7ad36b490568b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_planar_complex_f16_f16_f32_tensor_op_sm80.cu @@ -0,0 +1,393 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-level GEMM API for Planar Complex. +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/kernel/default_gemm_planar_complex_universal.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +#include "testbed_planar_complex.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s16816_tn_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s16816_tn : gemm_planar_complex_s16816_tn_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmPlanarComplex_f16t_f16n_f32n_tensor_op_f32_16816, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_f16_s16816_tn_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_f16_s16816_tn : gemm_planar_complex_f16_s16816_tn_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmPlanarComplex_f16t_f16n_f16n_tensor_op_f32_16816, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s16816_hc_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kConjugate, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s16816_hc : gemm_planar_complex_s16816_hc_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmPlanarComplex_f16h_f16c_f32n_tensor_op_f32_16816, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_f16_s16816_hc_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + 8, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kConjugate, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_f16_s16816_hc : gemm_planar_complex_f16_s16816_hc_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmPlanarComplex_f16h_f16c_f16n_tensor_op_f32_16816, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s16816_nt_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s16816_nt : gemm_planar_complex_s16816_nt_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmPlanarComplex_f16n_f16t_f32n_tensor_op_f32_16816, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_f16_s16816_nt_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_f16_s16816_nt : gemm_planar_complex_f16_s16816_nt_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmPlanarComplex_f16n_f16t_f16n_tensor_op_f32_16816, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_s16816_ch_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kConjugate, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + 8, + float, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_s16816_ch : gemm_planar_complex_s16816_ch_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmPlanarComplex_f16c_f16h_f32n_tensor_op_f32_16816, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} + +//////////////////////////////////////////////////////////////////////////////// + +using gemm_planar_complex_cf16_s16816_ch_base = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::ComplexTransform::kConjugate, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + 8, + cutlass::half_t, + cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + float, + 4, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::arch::OpMultiplyAdd +>::GemmKernel; + +struct gemm_planar_complex_cf16_s16816_ch : gemm_planar_complex_cf16_s16816_ch_base { + +}; + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmPlanarComplex_f16c_f16h_f16n_tensor_op_f32_16816, 64x64x32_32x32x32) { + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + EXPECT_TRUE(test::gemm::device::TestAllGemmPlanarComplex()); +} +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4n_s4t_s4n_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4n_s4t_s4n_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..e281e9f40f3aea2b7ba3b2f882de11d6360b3503 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4n_s4t_s4n_tensor_op_s32_sm75.cu @@ -0,0 +1,197 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_interleaved.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s4n_s4t_s4n_tensor_op_s32, 64x128x128_32x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::ColumnMajorInterleaved<64>, + cutlass::int4b_t, + cutlass::layout::RowMajorInterleaved<64>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<64>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s4n_s4t_s4n_tensor_op_s32, 128x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::ColumnMajorInterleaved<64>, + cutlass::int4b_t, + cutlass::layout::RowMajorInterleaved<64>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<64>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s4n_s4t_s4n_tensor_op_s32, 256x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::ColumnMajorInterleaved<64>, + cutlass::int4b_t, + cutlass::layout::RowMajorInterleaved<64>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<64>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s4n_s4t_s4n_tensor_op_s32, 128x256x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::ColumnMajorInterleaved<64>, + cutlass::int4b_t, + cutlass::layout::RowMajorInterleaved<64>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<64>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4n_s4t_s4n_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4n_s4t_s4n_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..0d95c50d6e5f385eb84e09ee197c98e265cc3765 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4n_s4t_s4n_tensor_op_s32_sm80.cu @@ -0,0 +1,215 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "multistage_testbed_interleaved.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s4n_s4t_s4n_tensor_op_s32, 64x128x128_32x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::ColumnMajorInterleaved<64>, + cutlass::int4b_t, + cutlass::layout::RowMajorInterleaved<64>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<64>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 32, + 32, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s4n_s4t_s4n_tensor_op_s32, 128x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::ColumnMajorInterleaved<64>, + cutlass::int4b_t, + cutlass::layout::RowMajorInterleaved<64>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<64>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 32, + 32, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s4n_s4t_s4n_tensor_op_s32, 256x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::ColumnMajorInterleaved<64>, + cutlass::int4b_t, + cutlass::layout::RowMajorInterleaved<64>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<64>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 32, + 32, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s4n_s4t_s4n_tensor_op_s32, 128x256x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::ColumnMajorInterleaved<64>, + cutlass::int4b_t, + cutlass::layout::RowMajorInterleaved<64>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<64>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 32, + 32, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..bb11ec4e3ac1cd82bb8af7a5615278d18cfa2501 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm75.cu @@ -0,0 +1,247 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x256x128_64x64x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 256x128x128_64x64x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x128x128_64x64x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 64x128x128_32x64x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x64x128_64x32x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 64x64x128_32x32x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..0c028e06b3304954e65cf39f85264721edb66357 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm80.cu @@ -0,0 +1,360 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x256x256_64x64x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 256x128x256_64x64x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x128x256_64x64x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, + cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 256x64x256_64x64x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 64x256x256_64x64x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 64x128x256_32x64x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 256>, + cutlass::gemm::GemmShape<32, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x64x256_64x32x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 256>, + cutlass::gemm::GemmShape<64, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 64x64x256_32x32x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 256>, + cutlass::gemm::GemmShape<32, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x256x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 256x128x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x128x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 256x64x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 64x256x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 64x128x128_32x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 128x64x128_64x32x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s4t_s4n_s32n_tensor_op_s32, 64x64x128_32x32x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif //#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_wmma_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_wmma_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..cf8d766808eba357a790e7e2b5f5110e1e7902fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32n_wmma_tensor_op_s32_sm75.cu @@ -0,0 +1,246 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x8x32, DataType/Instruction = s4 * s4 + s32 => s32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s4t_s4n_s32n_wmma_tensor_op_s32, 128x256x128_64x64x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_wmma_tensor_op_s32, 256x128x128_64x64x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_wmma_tensor_op_s32, 128x128x128_64x64x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_wmma_tensor_op_s32, 64x128x128_32x64x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_wmma_tensor_op_s32, 128x64x128_64x32x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32n_wmma_tensor_op_s32, 64x64x128_32x32x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..a2e9199d6a575bf9f04bb05f2ba168ce235df528 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sm75.cu @@ -0,0 +1,247 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x256x128_64x64x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 256x128x128_64x64x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x128x128_64x64x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 64x128x128_32x64x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x64x128_64x32x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 64x64x128_32x32x128) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..d96224936b1f3a54510475441161a4d524ec85d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sm80.cu @@ -0,0 +1,363 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x256x256_64x64x256, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 256x128x256_64x64x256, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x128x256_64x64x256, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, + cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 256x64x256_64x64x256, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 64x256x256_64x64x256, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 64x128x256_32x64x256, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 256>, + cutlass::gemm::GemmShape<32, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x64x256_64x32x256, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 256>, + cutlass::gemm::GemmShape<64, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 64x64x256_32x32x256, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 256>, + cutlass::gemm::GemmShape<32, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x256x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 256x128x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x128x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 256x64x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 64x256x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 64x256x128_32x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 128x64x128_64x32x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s4t_s4n_s32t_tensor_op_s32, 64x64x128_32x32x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_wmma_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_wmma_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..a989f4ea378f614ec72337a0f6ab6bcbb04d9b5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s32t_wmma_tensor_op_s32_sm75.cu @@ -0,0 +1,246 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" +///////////////////////////////////////////////////////////////////////////////////////////////// +///////// WMMA Instruction Shape = 8x8x32, DataType/Instruction = s4 * s4 + s32 => s32 ////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s4t_s4n_s32t_wmma_tensor_op_s32, 128x256x128_64x64x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_wmma_tensor_op_s32, 256x128x128_64x64x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_wmma_tensor_op_s32, 128x128x128_64x64x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_wmma_tensor_op_s32, 64x128x128_32x64x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_wmma_tensor_op_s32, 128x64x128_64x32x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s32t_wmma_tensor_op_s32, 64x64x128_32x32x128_8x8x32) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4n_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4n_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..e32c48205c6a1a341cd327be6b78c2021887c688 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4n_tensor_op_s32_sm75.cu @@ -0,0 +1,343 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x256x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 256x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32_align8, 256x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 8, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x256x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 256x64x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 32 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x128x128_32x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x64x128_64x32x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 32 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x64x128_32x32x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 32 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4n_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4n_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..bce66df78a90d305bc265e927cc21063704aec67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4n_tensor_op_s32_sm80.cu @@ -0,0 +1,394 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "multistage_testbed.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x256x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 256x128x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x128x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 256x64x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x256x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x128x256_32x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 256>, + cutlass::gemm::GemmShape<32, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x64x256_64x32x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 256>, + cutlass::gemm::GemmShape<64, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x64x256_32x32x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 256>, + cutlass::gemm::GemmShape<32, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x256x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 256x128x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32_align8, 256x128x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 8, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x128x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 256x64x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x256x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x128x128_32x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 128x64x128_64x32x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4n_tensor_op_s32, 64x64x128_32x32x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +#endif // #if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4t_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4t_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..f2ce0da3222879f1e8ecafd62f5f736b8127bbf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4t_tensor_op_s32_sm75.cu @@ -0,0 +1,343 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x256x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 256x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32_align8, 256x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 8, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x128x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x256x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 256x64x128_64x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 32 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x128x128_32x64x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x64x128_64x32x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 32 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +TEST(SM75_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x64x128_32x32x128) { + + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, + cutlass::layout::RowMajor, + cutlass::int4b_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<8, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 32 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4t_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4t_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..6d62511ba131d51e1716202fb980344167f4d2d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s4t_s4n_s4t_tensor_op_s32_sm80.cu @@ -0,0 +1,394 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "multistage_testbed.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x256x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 256x128x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x128x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 256x64x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x256x256_64x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x128x256_32x64x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 256>, + cutlass::gemm::GemmShape<32, 64, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x64x256_64x32x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 256>, + cutlass::gemm::GemmShape<64, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x64x256_32x32x256, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 256>, + cutlass::gemm::GemmShape<32, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x256x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 256x128x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32_align8, 256x128x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 8, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x128x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 256x64x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x256x128_64x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x128x128_32x64x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 128x64x128_64x32x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +CUTLASS_TEST_L0(SM80_Device_Gemm_s4t_s4n_s4t_tensor_op_s32, 64x64x128_32x32x128, { + using ElementOutput = cutlass::int4b_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::int4b_t, cutlass::layout::RowMajor, cutlass::int4b_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 32 / cutlass::sizeof_bits::value, ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + test::gemm::device::MultistageTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +#endif // #if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8n_s8t_s8n_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8n_s8t_s8n_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..ef0100a4202e09e0210ab87d6dba30e39d4ac0dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8n_s8t_s8n_tensor_op_s32_sm75.cu @@ -0,0 +1,291 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_interleaved.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 32x64x64_16x32x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<16, 32, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 64x64x64_32x32x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 128x64x64_64x32x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 64x128x64_32x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 128x128x64_64x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 256x128x64_64x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 128x256x64_64x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + test::gemm::device::InterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8n_s8t_s8n_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8n_s8t_s8n_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..5dbb51e442bd29fc6b68169585e9d62e050c76e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8n_s8t_s8n_tensor_op_s32_sm80.cu @@ -0,0 +1,358 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "multistage_testbed_interleaved.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 64x64x64_32x32x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6, + 16, + 16, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 128x64x64_64x32x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 16, + 16, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 64x128x64_32x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 16, + 16, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 128x128x64_64x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 16, + 16, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 256x128x64_64x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 16, + 16, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 64x256x64_64x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 16, + 16, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 256x64x64_64x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 16, + 16, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32, 128x256x64_64x64x64) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajorInterleaved<32>, + int8_t, + cutlass::layout::RowMajorInterleaved<32>, + ElementOutput, + cutlass::layout::ColumnMajorInterleaved<32>, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 16, + 16, + false, + cutlass::arch::OpMultiplyAddSaturate + >; + + test::gemm::device::MultistageInterleavedTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..3d304510095716fb340a5c2b0162f2a65c4766a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm75.cu @@ -0,0 +1,247 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x256x64_64x64x64) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 256x128x64_64x64x64) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x128x64_64x64x64) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 64x128x64_32x64x64) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x64x64_64x32x64) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 64x64x64_32x32x64) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..4ce7d707219d702a8dbbc22099d76fde3e200baf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm80.cu @@ -0,0 +1,361 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x256x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 256x128x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x128x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 256x64x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 64x256x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 64x128x128_32x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x64x128_64x32x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 64x64x128_32x32x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x256x64_64x64x64) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 256x128x64_64x64x64) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x128x64_64x64x64) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 256x64x64_64x64x64) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 64x256x64_64x64x64) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 64x128x64_32x64x64) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 128x64x64_64x32x64) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32, 64x64x64_32x32x64) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::ColumnMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_wmma_tensor_op_s32_sm72.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_wmma_tensor_op_s32_sm72.cu new file mode 100644 index 0000000000000000000000000000000000000000..251e1389d2d01ae5a11ddb1d398311c7826309c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32n_wmma_tensor_op_s32_sm72.cu @@ -0,0 +1,151 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM72_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 16x16x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s32n_wmma_tensor_op_s32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s8t_s8n_s32n_wmma_tensor_op_s32, 64x128x64_32x32x64_16x16x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 8x32x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s32n_wmma_tensor_op_s32, 64x128x64_32x64x64_8x32x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM72_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..9ff2bccdc12f50001560f9627026dea536909827 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sm80.cu @@ -0,0 +1,361 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x256x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 256x128x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x128x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 256x64x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x256x128_64x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x128x128_32x64x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x64x128_64x32x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x64x128_32x32x128, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x256x64_64x64x64, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 256x128x64_64x64x64, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x128x64_64x64x64, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 256x64x64_64x64x64, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x256x64_64x64x64, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x128x64_32x64x64, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x64x64_64x32x64, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L1(SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x64x64_32x32x64, { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<16, 8, 32>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..c2f46a2a91c1abba9c5a747d44a3f2e563c643f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sparse_sm80.cu @@ -0,0 +1,269 @@ +/************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_sparse.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_sparse.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x256x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 256x128x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x128x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 256x64x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x256x128_64x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 128>, + cutlass::gemm::GemmShape<64, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x128x128_32x64x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 128>, + cutlass::gemm::GemmShape<32, 64, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x64x128_64x32x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 128>, + cutlass::gemm::GemmShape<64, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x64x128_32x32x128) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 128>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x128x256_64x64x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 256>, + cutlass::gemm::GemmShape<64, 64, 256>, + cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 128x64x256_64x32x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 256>, + cutlass::gemm::GemmShape<64, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + +TEST(SM80_Device_Sparse_Gemm_s8t_s8n_s32t_tensor_op_s32, 64x64x256_32x32x256) { + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + using ElementCompute = int32_t; + + using Gemm = cutlass::gemm::device::SparseGemm< + int8_t, cutlass::layout::RowMajor, int8_t, + cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 256>, + cutlass::gemm::GemmShape<32, 32, 256>, cutlass::gemm::GemmShape<16, 8, 64>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 6>; + + EXPECT_TRUE(test::gemm::device::TestAllSparseGemm()); +} + + +//////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_wmma_tensor_op_s32_sm72.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_wmma_tensor_op_s32_sm72.cu new file mode 100644 index 0000000000000000000000000000000000000000..08ae460a75691fa5a3ca8ba693f11a71e2855562 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s32t_wmma_tensor_op_s32_sm72.cu @@ -0,0 +1,186 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM72_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 16x16x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s32t_wmma_tensor_op_s32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM75_Device_Gemm_s8t_s8n_s32t_wmma_tensor_op_s32, 64x128x64_32x32x64_16x16x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 32x8x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s32t_wmma_tensor_op_s32, 64x128x64_32x64x64_32x8x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 8x32x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s32t_wmma_tensor_op_s32, 64x128x64_32x64x64_8x32x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM72_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8n_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8n_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..2cebb98b32a9a08538eda224bbee4b652b4140a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8n_tensor_op_s32_sm75.cu @@ -0,0 +1,214 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8n_tensor_op_s32, 128x256x64_64x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); + +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8n_tensor_op_s32, 256x128x64_64x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8n_tensor_op_s32_align8, 256x128x64_64x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 8>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8n_tensor_op_s32, 128x128x64_64x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); + +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8n_tensor_op_s32, 64x128x64_32x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::ColumnMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); + +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8n_tensor_op_s32, 128x64x64_64x32x64, { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); + +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8n_tensor_op_s32, 64x64x64_32x32x64, { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 64 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); + +} ) + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8n_wmma_tensor_op_s32_sm72.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8n_wmma_tensor_op_s32_sm72.cu new file mode 100644 index 0000000000000000000000000000000000000000..e0ffc83cc2c57517b5d42874cdb0cc74516387fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8n_wmma_tensor_op_s32_sm72.cu @@ -0,0 +1,177 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM72_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 16x16x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s8n_wmma_tensor_op_s32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_s8t_s8n_s8n_wmma_tensor_op_s32, 64x128x64_32x32x64_16x16x16) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 32x8x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s8n_wmma_tensor_op_s32, 64x128x64_32x64x64_32x8x16) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 8x32x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s8n_wmma_tensor_op_s32, 64x128x64_32x64x64_8x32x16) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM72_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8t_tensor_op_s32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8t_tensor_op_s32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..0a4706d73d4436fbffaca76f9c3841df9eebef46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8t_tensor_op_s32_sm75.cu @@ -0,0 +1,188 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8t_tensor_op_s32, 128x256x64_64x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8t_tensor_op_s32, 256x128x64_64x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8t_tensor_op_s32_align8, 256x128x64_64x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 8>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8t_tensor_op_s32, 128x128x64_64x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 64>, + cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); + +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8t_tensor_op_s32, 64x128x64_32x64x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, 128 / cutlass::sizeof_bits::value>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8t_tensor_op_s32, 128x64x64_64x32x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 64, 64>, + cutlass::gemm::GemmShape<64, 32, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + test::gemm::device::Testbed testbed; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +CUTLASS_TEST_L0(SM75_Device_Gemm_s8t_s8n_s8t_tensor_op_s32, 64x64x64_32x32x64, { + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, + ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, cutlass::gemm::GemmShape<8, 8, 16>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, 64 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementCompute>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; + + test::gemm::device::Testbed testbed; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8t_wmma_tensor_op_s32_sm72.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8t_wmma_tensor_op_s32_sm72.cu new file mode 100644 index 0000000000000000000000000000000000000000..12ab891c4439c79204f872792babc3b7ff6b8987 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_s8t_s8n_s8t_wmma_tensor_op_s32_sm72.cu @@ -0,0 +1,178 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM72_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 16x16x16, DataType/Instruction = s8*s8+s32=>s8 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s8t_wmma_tensor_op_s32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM75_Device_Gemm_s8t_s8n_s8t_wmma_tensor_op_s32, 64x128x64_32x32x64_16x16x16) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 32x8x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s8t_wmma_tensor_op_s32, 64x128x64_32x64x64_32x8x16) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 8x32x16, DataType/Instruction = s8*s8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_s8t_s8n_s8t_wmma_tensor_op_s32, 64x128x64_32x64x64_8x32x16) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<8, 32, 16>, + cutlass::epilogue::thread::FastLinearCombinationClamp< + ElementOutput, + 128 / cutlass::sizeof_bits::value + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM72_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_serial_tensor_op_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_serial_tensor_op_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..30f0bb7a1d8357ee58802008f898505ceb8797be --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_serial_tensor_op_sm75.cu @@ -0,0 +1,114 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmSplitKSerial_f16n_f16n_f16t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + static const int kStages = 2; + + static const int kAlignmentA = cutlass::gemm::device::DefaultGemmConfiguration< + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + ElementA, + ElementB, + ElementOutput, + ElementAccumulator>::kAlignmentA; + + static const int kAlignmentB = cutlass::gemm::device::DefaultGemmConfiguration< + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + ElementA, + ElementB, + ElementOutput, + ElementAccumulator>::kAlignmentB; + + static const bool kSplitKSerial = true; + + using Gemm = cutlass::gemm::device::Gemm< + ElementA, + cutlass::layout::ColumnMajor, + ElementB, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombinationRelu< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + kStages, + kAlignmentA, + kAlignmentB, + kSplitKSerial + >; + + bool result = test::gemm::device::TestAllGemm(); + EXPECT_TRUE(result); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_simt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_simt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..1b4eba2dbc371c37ffe1ac79842b8d290bf883a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_simt_sm50.cu @@ -0,0 +1,146 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_splitk_parallel.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_splitk.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_GemmSplitKParallel_f32n_f32t_f32t_simt_f32, 128x128x8) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM50_Device_GemmSplitKParallel_f32n_f32n_f32n_simt_f32, 128x128x8) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_GemmSplitKParallel_f64n_f64n_f64t_simt_f64, 64x128x8) { + + using Element = double; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + cutlass::gemm::GemmShape<64, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM50_Device_GemmSplitKParallel_f64t_f64t_f64n_simt_f64, 64x64x8) { + + using Element = double; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_tensor_op_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_tensor_op_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..3d8e8db2582e0b4c3c441fc475cee5bde44719d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_splitk_tensor_op_sm75.cu @@ -0,0 +1,336 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_splitk_parallel.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_splitk.h" + +// These operators are assert(0) unless extended PTX is used. +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmSplitKParallel_f16n_f16t_f32t_tensor_op_f32, 64x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16n_f16t_f32n_tensor_op_f32, 64x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16n_f16t_f16t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16n_f16t_f16n_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16n_f16t_f16t_tensor_op_f16, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16n_f16t_f16n_tensor_op_f16, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmSplitKParallel_f16t_f16n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16t_f16n_f32n_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16t_f16n_f16t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16t_f16n_f16n_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16t_f16n_f16t_tensor_op_f16, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +TEST(SM75_Device_GemmSplitKParallel_f16t_f16n_f16n_tensor_op_f16, 64x128x32_32x64x32) { + + using ElementOutput = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + + using Gemm = cutlass::gemm::device::GemmSplitKParallel< + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8> + >; + + test::gemm::device::TestAllGemmSplitK(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_testbed_3x_evt.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_testbed_3x_evt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9127d40786f856ead851d596d95b4c8dbbe18d0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_testbed_3x_evt.hpp @@ -0,0 +1,1458 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Testbed and host reference for EVT unittest +*/ + + +#pragma once +#include "gemm_testbed_3x.hpp" + +namespace test { +namespace gemm { +namespace device { + +/// Host-side tapply, tapply in cute is HOST_DEVICE +template +constexpr auto +tapply(T&& t, F&& f, G&& g, cute::seq) +{ + return g(f(std::get(static_cast(t)))...); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT: Base class for EVT Node + +template < + typename Gemm_ +> +class HostEVTNodeBase { +public: + using Gemm = Gemm_; + using TestBedImpl = typename detail::TestbedImpl; + using Kernel = typename Gemm::GemmKernel; + using Epilogue = typename Kernel::CollectiveEpilogue; + using ElementCompute = typename TestBedImpl::ElementCompute; + using ElementScalar = typename TestBedImpl::ElementScalar; + using ElementAccumulator = typename Kernel::ElementAccumulator; + using ElementC = typename Kernel::ElementC; + using ElementD = typename Kernel::ElementD; + + using LayoutTagC = typename TestBedImpl::LayoutTagC; + using LayoutTagD = typename TestBedImpl::LayoutTagD; +private: + bool _check_relative_equality; + // Factors used for calculating relative equality. These default + // values are borrowed from those used by default in the CUTLASS + // profiler for performing relative equality checks. + float _epsilon = 0.05f; + float _nonzero_floor = 1.0f / 256.0f; + +public: + HostEVTNodeBase(){} + HostEVTNodeBase(bool check_relative_equality): + _check_relative_equality(check_relative_equality) { } + + + template < + class Element, + class Layout + > + bool equality_check( + cutlass::TensorView const& lhs, + cutlass::TensorView const& rhs) const { + if (_check_relative_equality) { + return cutlass::reference::host::TensorRelativelyEquals( + lhs, rhs, Element(_epsilon), Element(_nonzero_floor) + ); + } + else { + return cutlass::reference::host::TensorEquals(lhs, rhs); + } + } + + void* get_tensor_C_ptr() { + return nullptr; + } + + void* get_tensor_D_ptr() { + return nullptr; + } + + bool compare_reference(std::stringstream& error_ss) { + return true; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Accumulator + +template < + typename Gemm +> +class HostAccumulator: public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using TestBedImpl = typename Base::TestBedImpl; + using ElementAccumulator = typename Base::ElementAccumulator; + using ElementCompute = typename Base::ElementCompute; + + struct Arguments { }; + +private: + cutlass::NumericConverter accumulator_converter; +public: + HostAccumulator(){} + template + HostAccumulator(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :Base(check_relative_equality) {} + + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + + return accumulator_converter(acc); + } + + Arguments get_arguments() { + return Arguments{}; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Scalar Broadcast + +template < + typename Gemm, + int Value, + int BroadcastCount = 1, + template class ReductionFn = cutlass::multiplies +> +class HostScalarBroadcast : public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using ElementCompute = typename Base::ElementCompute; + + struct Arguments { + ElementCompute scalar[BroadcastCount]; + ElementCompute const* scalar_ptrs[BroadcastCount]; + cute::Stride dScalar; + }; +private: + ElementCompute _scalar; +public: + HostScalarBroadcast(){} + template + HostScalarBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :_scalar(ElementCompute(Value)), Base(check_relative_equality) {} + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + + return _scalar; + } + + bool compare_reference(std::stringstream& error_ss) { + error_ss << "Scalar: " << float(_scalar) << "\n\n"; + return true; + } + + Arguments get_arguments() { + if constexpr (BroadcastCount == 1) + return Arguments{{_scalar}, {nullptr}}; + else if constexpr (BroadcastCount == 2) + return Arguments{{_scalar, _scalar}, {nullptr, nullptr}}; + else if constexpr (BroadcastCount == 3) + return Arguments{{_scalar, _scalar, _scalar}, {nullptr, nullptr, nullptr}}; + else + return Arguments{{_scalar}, {nullptr}}; + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Row Broadcast +template < + typename Gemm, + typename ElementBias_=void +> +class HostRowBroadcast: public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using ElementBias = std::conditional_t, + typename Base::ElementC, + ElementBias_>; + + using TestBedImpl = typename Base::TestBedImpl; + using ElementCompute = typename Base::ElementCompute; + using LayoutTagVector = cutlass::layout::PackedVectorLayout; + + struct Arguments { + ElementBias const* ptr_row = nullptr; + ElementBias null_default = ElementBias(0); + cute::Stride dRow = {}; + }; +private: + cutlass::NumericConverter _bias_converter; + cutlass::HostTensor _bias; + int _N; + TestBedImpl impl_; +public: + HostRowBroadcast(){} + template + HostRowBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :impl_(impl), Base(check_relative_equality) { + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + _N = cute::get<1>(problem_shape_MNKL); + _bias.resize(cutlass::Coord<1>(_N)); + + EXPECT_TRUE( + impl_.initialize_tensor( + _bias.host_view(), cutlass::Distribution::Uniform, + impl_.seed + 2023 + ) + ); + _bias.sync_device(); + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + auto TensorBias = cute::make_tensor(_bias.host_data(), + cute::make_layout(cute::make_shape(cute::_1{}, _N))); + + return _bias_converter(TensorBias(1, n + n_b)); + } + + bool compare_reference(std::stringstream& error_ss) { + error_ss + << "PerColumnBias = \n" << _bias.host_view() << "\n\n"; + return true; + } + + Arguments get_arguments() { + return {_bias.device_data()}; + } + +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Column Broadcast +template < + typename Gemm, + typename ElementBias_=void +> +class HostColBroadcast: public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using ElementBias = std::conditional_t, + typename Base::ElementC, + ElementBias_>; + + using TestBedImpl = typename Base::TestBedImpl; + using ElementCompute = typename Base::ElementCompute; + using LayoutTagVector = cutlass::layout::PackedVectorLayout; + + struct Arguments { + ElementBias const* ptr_row = nullptr; + ElementBias null_default = ElementBias(0); + cute::Stride dRow = {}; + }; +private: + cutlass::NumericConverter _bias_converter; + cutlass::HostTensor _bias; + int _M; + TestBedImpl impl_; +public: + HostColBroadcast(){} + template + HostColBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :impl_(impl), Base(check_relative_equality) { + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + _M = cute::get<0>(problem_shape_MNKL); + _bias.resize(cutlass::Coord<1>(_M)); + + EXPECT_TRUE( + impl_.initialize_tensor( + _bias.host_view(), cutlass::Distribution::Uniform, + impl_.seed + 2023 + ) + ); + _bias.sync_device(); + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + auto TensorBias = cute::make_tensor(_bias.host_data(), + cute::make_layout(cute::make_shape(_M, cute::_1{}))); + + return _bias_converter(TensorBias(m + m_b, 1)); + } + + bool compare_reference(std::stringstream& error_ss) { + error_ss + << "PerRowBias = \n" << _bias.host_view() << "\n\n"; + return true; + } + + Arguments get_arguments() { + return {_bias.device_data()}; + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Aux Load + +template < + typename Gemm, + bool isC=false, + typename ElementAuxLoad_=void, + typename LayoutTagAux_=void +> +class HostAuxLoad: public HostEVTNodeBase { +public: + using ElementAuxLoad = std::conditional_t, + typename HostEVTNodeBase::ElementC, + ElementAuxLoad_>; + using LayoutTagAux = std::conditional_t, + typename HostEVTNodeBase::LayoutTagC, + LayoutTagAux_>; + + using Base = HostEVTNodeBase; + using TestBedImpl = typename Base::TestBedImpl; + using ElementCompute = typename Base::ElementCompute; + + using StrideAux = cutlass::gemm::TagToStrideC_t; + struct Arguments_Aux { + ElementAuxLoad const *ptr_aux = nullptr; + ElementAuxLoad null_default = ElementAuxLoad(0); + StrideAux dAux = {}; + }; + + struct Arguments_C {}; + + using Arguments = cute::conditional_t; + +private: + cutlass::NumericConverter _aux_load_converter; + cutlass::HostTensor _tensor_aux_load; + + int _M, _N, _L; + + TestBedImpl impl_; + + StrideAux _stride_aux; +public: + HostAuxLoad(){} + template + HostAuxLoad(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :impl_(impl), Base(check_relative_equality){ + auto problem_shape_NMKL = cute::append<4>(problem_size, 1); + auto [_M, _N, K, _L] = problem_shape_NMKL; + auto aux_coord = cutlass::make_Coord(_M * _L, _N); + _tensor_aux_load.resize( + aux_coord, + cutlass::layout::Affine2Layout_Factory::layout_factory( + aux_coord, typename LayoutTagAux::Stride() + ) + ); + EXPECT_TRUE( + impl_.initialize_tensor( + _tensor_aux_load.host_view(), + cutlass::Distribution::Uniform, + impl_.seed + 2023 + ) + ); + _tensor_aux_load.sync_device(); + _stride_aux = cutlass::make_cute_packed_stride(StrideAux{}, cute::make_shape(_M, _N, _L)); + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + + + auto TensorAuxLoad = cute::make_tensor(_tensor_aux_load.host_data(), + cute::make_layout(cute::make_shape(_M, _N, _L), _stride_aux)); + return _aux_load_converter(TensorAuxLoad(m + m_b, n + n_b, l)); + } + + bool compare_reference(std::stringstream& error_ss) { + if constexpr (!isC) { + error_ss + << "AuxLoad = \n" << _tensor_aux_load.host_view()<< "\n\n"; + } + return true; + } + + void* get_tensor_C_ptr() { + if constexpr (isC) { + return static_cast(_tensor_aux_load.device_data()); + } else { + return nullptr; + } + } + + Arguments get_arguments() { + if constexpr (isC) + return {}; + else + return {_tensor_aux_load.device_data(), ElementAuxLoad(0), _stride_aux}; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Compute + +template +T* findNonNullPtr(T* first_ptr) { + return first_ptr; +} + +template +T* findNonNullPtr(T* first_ptr, Args... args) { + if (first_ptr) { + return first_ptr; + } + return findNonNullPtr(args...); +} + +template < + typename Gemm, + template class ComputeOp_ +> +class HostCompute: public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using ElementCompute = typename Base::ElementCompute; + using ComputeOp = ComputeOp_; + + struct Arguments { + struct OpArgs {} op; + }; +private: + ComputeOp _op; +public: + HostCompute(){} + template + HostCompute(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false): + Base(check_relative_equality) { } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc, Args... frg_inputs) { + return _op(frg_inputs...); + } + + Arguments get_arguments(){ + return {}; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Unary Compute + +template < + typename Gemm, + template class ComputeOp_, + typename Child0 +> +class HostUnaryCompute: public HostEVTNodeBase { +public: + + using Base = HostEVTNodeBase; + using ElementCompute = typename Base::ElementCompute; + using ComputeOp = ComputeOp_; + + struct Arguments { + typename Child0::Arguments child_0_args; + struct OpArgs {} op; + }; +private: + ComputeOp _op; + Child0 _child_0; +public: + HostUnaryCompute(){} + template + HostUnaryCompute(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false): + _child_0(problem_size, impl, check_relative_equality), + Base(check_relative_equality) { } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + ElementCompute child_0_result = _child_0.visit(m, n, l, m_b, n_b, acc); + return _op(child_0_result); + } + + Arguments get_arguments(){ + return { + _child_0.get_arguments(), + {}, + }; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Aux Store + +template < + typename Gemm, + bool isD=false, + class ElementAuxStore_=void, + typename LayoutTagAux_=void +> +class HostAuxStore: public HostEVTNodeBase { +public: + using ElementAuxStore = std::conditional_t, + typename HostEVTNodeBase::ElementD, + ElementAuxStore_>; + using LayoutTagAux = std::conditional_t, + typename HostEVTNodeBase::LayoutTagD, + LayoutTagAux_>; + + using Base = HostEVTNodeBase; + using TestBedImpl = typename Base::TestBedImpl; + using ElementCompute = typename Base::ElementCompute; + + using StrideAux = cutlass::gemm::TagToStrideC_t; + struct Arguments_Aux { + struct OpArgs { + ElementAuxStore* ptr_aux = nullptr; + StrideAux dAux = {}; + } op; + }; + + struct Arguments_D {}; + + using Arguments = cute::conditional_t; + + +private: + cutlass::NumericConverter destination_converter; + cutlass::HostTensor _tensor_aux_store; + cutlass::HostTensor _reference_aux_store; + int _M, _N, _L; + TestBedImpl impl_; + StrideAux _stride_aux; +public: + HostAuxStore(){} + template + HostAuxStore(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false): + impl_(impl), + Base(check_relative_equality) { + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + auto [_M, _N, K, _L] = problem_shape_MNKL; + auto aux_coord = cutlass::make_Coord(_M * _L, _N); + _tensor_aux_store.resize( + aux_coord, + cutlass::layout::Affine2Layout_Factory::layout_factory( + aux_coord, typename LayoutTagAux::Stride() + ) + ); + + _reference_aux_store.resize( + aux_coord, + cutlass::layout::Affine2Layout_Factory::layout_factory( + aux_coord, typename LayoutTagAux::Stride() + ) + ); + _tensor_aux_store.sync_device(); + _stride_aux = cutlass::make_cute_packed_stride(StrideAux{}, cute::make_shape(_M, _N, _L)); + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc, ElementCompute child_0_result) { + + auto TensorAuxStore = cute::make_tensor(static_cast(_reference_aux_store.host_data()), + cute::make_layout(cute::make_shape(_M, _N, _L), _stride_aux)); + TensorAuxStore(m + m_b, n + n_b, l) = destination_converter(child_0_result); + return child_0_result; + } + + bool compare_reference(std::stringstream& error_ss) { + // Verify the store node + _tensor_aux_store.sync_host(); + + bool equal = this->equality_check(_reference_aux_store.host_view(), _tensor_aux_store.host_view()); + if (!equal) { + error_ss + << "\n\nReference =\n" << _reference_aux_store.host_view() + << "\n\nComputed =\n" << _tensor_aux_store.host_view() << "\n\n"; + } + return equal; + } + + void* get_tensor_D_ptr() { + if constexpr (isD) + return static_cast(_tensor_aux_store.device_data()); + else + return nullptr; + } + + Arguments get_arguments() { + if constexpr (isD) { + return {}; + } else { + return {_tensor_aux_store.device_data(), _stride_aux}; + } + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Row Reduce + +template < + typename Gemm, + template class ReduceFn, + typename ElementReduce +> +class HostRowReduce: public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using TestBedImpl = typename Base::TestBedImpl; + using ElementCompute = typename Base::ElementCompute; + using ElementOutput = typename Base::ElementD; + using LayoutTagVector = cutlass::layout::PackedVectorLayout; + + struct Arguments { + struct OpArgs { + ElementReduce* ptr_row = nullptr; + ElementCompute reduce_identity = 0; + cute::Stride dRow = {}; + } op; + }; + +private: + cutlass::NumericConverter destination_converter; + cutlass::HostTensor _tensor_row_reduce; + cutlass::HostTensor _reduce_buffer; + cutlass::HostTensor _reference_row_reduce; + int _N; + TestBedImpl impl_; + ReduceFn reduce_fn; +public: + HostRowReduce(){} + template + HostRowReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false): + impl_(impl), + Base(check_relative_equality) { + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + _N = cute::get<1>(problem_shape_MNKL); + _tensor_row_reduce.resize(cutlass::Coord<1>(_N)); + _reference_row_reduce.resize(cutlass::Coord<1>(_N)); + _reduce_buffer.resize(cutlass::Coord<1>(_N)); + + _tensor_row_reduce.sync_device(); + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc, ElementCompute child_0_result) { + auto TensorRowReduce = cute::make_tensor(_reduce_buffer.host_data(), + cute::make_layout(cute::make_shape(cute::_1{}, _N))); + TensorRowReduce(1, n + n_b) = reduce_fn(TensorRowReduce(1, n + n_b), child_0_result); + return child_0_result; + } + + bool compare_reference(std::stringstream& error_ss) { + // Verify the store node + _tensor_row_reduce.sync_host(); + + auto TensorRowReduce = cute::make_tensor(_reference_row_reduce.host_data(), + cute::make_layout(cute::make_shape(cute::_1{}, _N))); + + auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(), + cute::make_layout(cute::make_shape(cute::_1{}, _N))); + + // Filling the reference tensor with the reduce buffer + for (int n = 0; n < _N; n ++) { + TensorRowReduce(1, n) = destination_converter(TensorReduceBuffer(1, n)); + } + + bool equal = this->equality_check(_reference_row_reduce.host_view(), _tensor_row_reduce.host_view()); + if (!equal) { + error_ss + << "\n\nRow Reduce Reference =\n" << _reference_row_reduce.host_view() + << "\n\nRow Reduce Computed =\n" << _tensor_row_reduce.host_view() << "\n\n"; + } + return equal; + } + + Arguments get_arguments() { + return {_tensor_row_reduce.device_data()}; + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Column Reduce + +template < + typename Gemm, + template class ReduceFn, + typename ElementReduce +> +class HostColumnReduce: public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using TestBedImpl = typename Base::TestBedImpl; + using ElementCompute = typename Base::ElementCompute; + using ElementOutput = typename Base::ElementD; + using LayoutTagVector = cutlass::layout::PackedVectorLayout; + + struct Arguments { + struct OpArgs { + ElementReduce* ptr_col = nullptr; + ElementCompute reduce_identity = 0; + cute::Stride dRow = {}; + } op; + }; + +private: + cutlass::NumericConverter destination_converter; + cutlass::HostTensor _tensor_column_reduce; + cutlass::HostTensor _reduce_buffer; + cutlass::HostTensor _reference_column_reduce; + int _M; + TestBedImpl impl_; + ReduceFn reduce_fn; +public: + HostColumnReduce(){} + template + HostColumnReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false): + impl_(impl), + Base(check_relative_equality) { + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + _M = cute::get<0>(problem_shape_MNKL); + _tensor_column_reduce.resize(cutlass::Coord<1>(_M)); + _reference_column_reduce.resize(cutlass::Coord<1>(_M)); + _reduce_buffer.resize(cutlass::Coord<1>(_M)); + + _tensor_column_reduce.sync_device(); + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc, ElementCompute child_0_result) { + auto TensorColReduce = cute::make_tensor(_reduce_buffer.host_data(), + cute::make_layout(cute::make_shape(_M, cute::_1{}))); + TensorColReduce(m + m_b, 1) = reduce_fn(TensorColReduce(m + m_b, 1), child_0_result); + return child_0_result; + } + + bool compare_reference(std::stringstream& error_ss) { + // Verify the store node + _tensor_column_reduce.sync_host(); + + auto TensorColReduce = cute::make_tensor(_reference_column_reduce.host_data(), + cute::make_layout(cute::make_shape(_M, cute::_1{}))); + + auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(), + cute::make_layout(cute::make_shape(_M, cute::_1{}))); + + // Filling the reference tensor with the reduce buffer + for (int m = 0; m < _M; m ++) { + TensorColReduce(m, 1) = destination_converter(TensorReduceBuffer(m, 1)); + } + + bool equal = this->equality_check(_reference_column_reduce.host_view(), _tensor_column_reduce.host_view()); + if (!equal) { + error_ss + << "\n\nColumn Reduce Reference =\n" << _reference_column_reduce.host_view() + << "\n\nColumn Reduce Computed =\n" << _tensor_column_reduce.host_view() << "\n\n"; + } + return equal; + } + + Arguments get_arguments() { + return {_tensor_column_reduce.device_data()}; + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// EVT - Scalar Reduce + +template < + typename Gemm, + template class ReduceFn, + typename ElementReduce +> +class HostScalarReduce: public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using TestBedImpl = typename Base::TestBedImpl; + using ElementCompute = typename Base::ElementCompute; + using ElementOutput = typename Base::ElementD; + using LayoutTagVector = cutlass::layout::PackedVectorLayout; + + struct Arguments { + struct OpArgs { + ElementReduce* ptr_scalar = nullptr; + ElementCompute reduce_identity = 0; + cute::Stride dScalar = {}; + } op; + }; + +private: + cutlass::NumericConverter destination_converter; + cutlass::HostTensor _tensor_scalar_reduce; + cutlass::HostTensor _reduce_buffer; + cutlass::HostTensor _reference_scalar_reduce; + ReduceFn reduce_fn; + TestBedImpl impl_; +public: + HostScalarReduce(){} + template + HostScalarReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false): + impl_(impl), + Base(check_relative_equality) { + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + _tensor_scalar_reduce.resize(cutlass::Coord<1>(1)); + _reference_scalar_reduce.resize(cutlass::Coord<1>(1)); + _reduce_buffer.resize(cutlass::Coord<1>(1)); + + _tensor_scalar_reduce.sync_device(); + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc, ElementCompute child_0_result) { + auto TensorRowReduce = cute::make_tensor(_reduce_buffer.host_data(), + cute::make_layout(cute::make_shape(cute::_1{}))); + TensorRowReduce(0) = reduce_fn(TensorRowReduce(0), child_0_result); + return child_0_result; + } + + bool compare_reference(std::stringstream& error_ss) { + // Verify the store node + _tensor_scalar_reduce.sync_host(); + + auto TensorRowReduce = cute::make_tensor(_reference_scalar_reduce.host_data(), + cute::make_layout(cute::make_shape(cute::_1{}))); + + auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(), + cute::make_layout(cute::make_shape(cute::_1{}))); + + // Filling the reference tensor with the reduce buffer + TensorRowReduce(0) = destination_converter(TensorReduceBuffer(0)); + + bool equal = this->equality_check(_reference_scalar_reduce.host_view(), _tensor_scalar_reduce.host_view()); + if (!equal) { + error_ss + << "\n\nScalar Reduce Reference =\n" << _reference_scalar_reduce.host_view() + << "\n\nScalar Reduce Computed =\n" << _tensor_scalar_reduce.host_view() << "\n\n"; + } + return equal; + } + + Arguments get_arguments() { + return {_tensor_scalar_reduce.device_data()}; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Host EVT wrapper + +/// The ArgumentPack is used to model the alignment when num ops <= 4 +template +struct ArgumentPack; + +template +struct ArgumentPack { + T arg; + ArgumentPack(T first): + arg(first) {} +}; + +template +struct ArgumentPack { + First arg; + ArgumentPack rest_args; + + ArgumentPack(First first, Rest... rest) : + arg(first), rest_args(rest...) {} +}; + + +/// Base class for Host Visitor +template +struct HostVisitorBase: public HostEVTNodeBase { +public: + using Base = HostEVTNodeBase; + using ElementCompute = typename Base::ElementCompute; + + using Arguments_struct = ArgumentPack; + using Arguments_tuple = cute::tuple; + + constexpr static int Rm1 = sizeof...(Ops); + constexpr static bool cond = Rm1 > 4; + using Arguments = cute::conditional_t; + + std::tuple ops; + + HostVisitorBase(){} + template + HostVisitorBase(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :Base(check_relative_equality), + ops(test::gemm::device::tapply(std::tuple{}, + [&] (auto&& op) { + using Op = cute::remove_cvref_t; + return Op(problem_size, impl, check_relative_equality); + }, + [] (auto&&... _ops) { + return std::make_tuple(_ops...); + }, + cute::make_seq{} + )){ } + + bool compare_reference(std::stringstream& error_ss) { + return cute::detail::tapply(ops, + [&](auto& op) { + return op.compare_reference(error_ss); + }, + [&] (auto&&... inputs) { + return arrayAnd(inputs...); + }, + cute::make_seq{} + ); + } + + void* get_tensor_C_ptr() { + return cute::detail::tapply(ops, + [&](auto& op) { + return op.get_tensor_C_ptr(); + }, + [&] (auto&&... inputs) { + return findNonNullPtr(inputs...); + }, + cute::make_seq{} + ); + } + + void* get_tensor_D_ptr() { + return cute::detail::tapply(ops, + [&](auto& op) { + return op.get_tensor_D_ptr(); + }, + [&] (auto&&... inputs) { + return findNonNullPtr(inputs...); + }, + cute::make_seq{} + ); + } + + Arguments get_arguments() { + return test::gemm::device::tapply(ops, + [&](auto& op) { + return op.get_arguments(); + }, + [&] (auto&&... args) { + if constexpr (Rm1 > 4) { + return cute::make_tuple(args...); + } else { + return Arguments(args...); + } + }, + cute::make_seq{} + ); + } + + bool arrayAnd(bool passed) { + return passed; + } + + template + bool arrayAnd(bool first_passed, Args... passed) { + if (first_passed) { + return arrayAnd(passed...); + } + return first_passed; + } + +}; + + +/// Tree-struct visitor +template +struct HostTreeVisitor: public HostVisitorBase { +public: + using Gemm = typename NodeOp::Base::Gemm; + using Base = HostVisitorBase; + using ElementCompute = typename Base::ElementCompute; + using Arguments = typename Base::Arguments; + + constexpr static int Rm1 = sizeof...(ChildOps); + + HostTreeVisitor(){} + template + HostTreeVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :Base(problem_size, impl, check_relative_equality){ } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + return cute::detail::tapply(this->ops, + [&] (auto& op) { + return op.visit(m, n, l, m_b, n_b, acc); + }, + [&] (auto&&... frg_inputs) { + return std::get(this->ops).visit(m, n, l, m_b, n_b, acc, frg_inputs...); + }, + cute::make_seq{} + ); + } +}; + + +/// General Graph visitor +template +struct HostTopoVisitor: public HostVisitorBase { +public: + using Base = HostVisitorBase; + using ElementCompute = typename Base::ElementCompute; + constexpr static int Rm1 = Base::Rm1; + using Arguments = typename Base::Arguments; + +private: + ElementCompute frg_outputs[Rm1]; +public: + HostTopoVisitor(){} + template + HostTopoVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :Base(problem_size, impl, check_relative_equality) { } + + template + ElementCompute visit_( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + frg_outputs[I] = cute::transform_apply(cute::get(EdgeTuple{}), + [&] (auto&& _E) { + constexpr int e = cute::remove_cvref_t::value; + return frg_outputs[e]; + }, + [&] (auto const&... frg_inputs) { + ElementCompute res = std::get(this->ops).visit(m, n, l, m_b, n_b, acc, frg_inputs...); + return res; + } + ); + + if constexpr (I < Rm1 - 1) { + return visit_(m, n, l, m_b, n_b, acc); + } else { + return frg_outputs[I]; + } + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + + return visit_(m, n, l, m_b, n_b, acc); + } + +}; + + +/// SplitTree visitor +template +struct HostSplitTreeVisitor: public HostVisitorBase { +public: + using Base = HostVisitorBase; + using ElementCompute = typename Base::ElementCompute; + using Arguments = typename Base::Arguments; + + constexpr static int Rm2 = sizeof...(AuxOutTrees); + +private: + ElementCompute frg_input; +public: + HostSplitTreeVisitor(){} + template + HostSplitTreeVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false) + :Base(problem_size, impl, check_relative_equality) { } + + template + void visitAux( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator frag) { + std::get(this->ops).visit(m, n, l, m_b, n_b, frag); + + if constexpr (I < Rm2 - 1) { + return visitAux(m, n, l, m_b, n_b, frag); + } else { + return; + } + } + + template + ElementCompute visit( + int64_t m, int64_t n, int64_t l, int m_b, int n_b, + ElementAccumulator acc) { + + /// Compute the input tree + frg_input = std::get<0>(this->ops).visit(m, n, l, m_b, n_b, acc); + + /// Compute the aux out tree + visitAux(m, n, l, m_b, n_b, frg_input); + /// Visit the output tree + return std::get(this->ops).visit(m, n, l, m_b, n_b, frg_input); + } +}; + +/// Universal testbed for EVT +template +class Testbed3xEVT { +public: + // The EVT Module to test + using EVTModule = typename EVT::EVTModule; + + using TestBedImpl = typename detail::TestbedImpl; + using Kernel = typename Gemm::GemmKernel; + using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue; + using ElementAccumulator = typename Kernel::ElementAccumulator; + using ElementC = typename Kernel::ElementC; + using ElementD = typename Kernel::ElementD; + + using ProblemShapeType = typename Kernel::ProblemShape; + + using LayoutTagA = typename TestBedImpl::LayoutTagA; + using LayoutTagB = typename TestBedImpl::LayoutTagB; + using LayoutTagC = typename TestBedImpl::LayoutTagC; + using LayoutTagD = typename TestBedImpl::LayoutTagD; + + // + // Methods + // + Testbed3xEVT( + bool check_relative_equality_, + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = TestBedImpl::kDefaultSeed + ) : + impl_(init_A_, init_B_, init_C_, seed_), check_relative_equality(check_relative_equality_) { } + + Testbed3xEVT( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = TestBedImpl::kDefaultSeed + ) : + impl_(init_A_, init_B_, init_C_, seed_), check_relative_equality(false) { } + + Testbed3xEVT( + typename LayoutTagA::Stride stride_factor_A_, + typename LayoutTagB::Stride stride_factor_B_, + typename LayoutTagC::Stride stride_factor_C_, + typename LayoutTagD::Stride stride_factor_D_, + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = TestBedImpl::kDefaultSeed + ) : + impl_(stride_factor_A_, + stride_factor_B_, + stride_factor_C_, + stride_factor_D_, + init_A_, + init_B_, + init_C_, + seed_), + check_relative_equality(false) { } + + /// Initializes data structures + void initialize(ProblemShapeType problem_size) { + // + // Allocate the GEMM workspace for A/B tensor + // + impl_.initialize(problem_size); + } + // Detail Implementation + TestBedImpl impl_; + + // Whether to use relative equality checks + bool check_relative_equality; + + bool verify(ProblemShapeType problem_size, EVTModule& host_reference) { + + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + auto M = cute::get<0>(problem_shape_MNKL); + auto N = cute::get<1>(problem_shape_MNKL); + auto K = cute::get<2>(problem_shape_MNKL); + auto L = cute::get<3>(problem_shape_MNKL); + auto coord_0 = cutlass::make_Coord(0); + + auto A = cute::make_tensor(impl_.tensor_A.host_data(), + cute::make_layout(cute::make_shape(M, K, L), impl_.stride_a)); + auto B = cute::make_tensor(impl_.tensor_B.host_data(), + cute::make_layout(cute::make_shape(N, K, L), impl_.stride_b)); + auto LayoutD = cute::make_layout(cute::make_shape(M, N, L), impl_.stride_d); + + cutlass::reference::host::GettMainloopParams mainloop_params{A, B}; + + /// Reference Kernel + static int constexpr kBlockM = 64; + static int constexpr kBlockN = 64; + +#if defined(_OPENMP) + #pragma omp parallel for collapse(3) +#endif + for (int64_t l = 0; l < cute::size<2>(mainloop_params.A.layout()); ++l) { + for (int64_t m = 0; m < cute::size<0>(mainloop_params.A.layout()); m += kBlockM) { + for (int64_t n = 0; n < cute::size<0>(mainloop_params.B.layout()); n += kBlockN) { + ElementAccumulator acc[kBlockM][kBlockN]; + gett_mainloop(mainloop_params, m, n, l, acc); + /// Epilogue EVT + for (int n_b = 0; n_b < kBlockN; ++n_b) { + for (int m_b = 0; m_b < kBlockM; ++m_b) { + if (m + m_b < cute::size<0>(LayoutD) && n + n_b < cute::size<1>(LayoutD)) { + host_reference.visit(m, n, l, m_b, n_b, acc[m_b][n_b]); + } + } + } + } + } + } + + std::stringstream error_ss; + bool passed = host_reference.compare_reference(error_ss); + if (!passed) { + std::stringstream fname; + fname << "error_Gemm_device_" + << M << "x" << N << "x" << K << "x" << L << "_" + << cute::get<0>(typename Gemm::GemmKernel::TileShape{}) << "_" + << cute::get<1>(typename Gemm::GemmKernel::TileShape{}) << "_" + << cute::get<2>(typename Gemm::GemmKernel::TileShape{}) << ".txt"; + + std::ofstream file(fname.str()); + file + << "problem: " << ' ' << M << "x" << N << "x" << K + << ", Batch count = " << L << "\n\n"; + + file + << "A =\n" << impl_.tensor_A.host_view() + << "\nB =\n" << impl_.tensor_B.host_view() + << "\nC =\n" << impl_.tensor_C.host_view() << "\n\n"; + + file << error_ss.str(); + } + + return passed; + } + + bool run( + ProblemShapeType problem_size, + bool profiling = false, + int iterations = 20, + int splits = 1) { + // Fail test if insufficient CUDA device + if (!impl_.sufficient()) { + std::cout << "Test failed due to insufficient CUDA device." << std::endl; + return false; + } + // + // Initialize the Gemm operator + // + + typename Gemm::Arguments arguments; + cutlass::KernelHardwareInfo hw_info; + hw_info.device_id = 0; + if (not profiling) { + impl_.sm_count = min(impl_.MaxSmCount, cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id)); + hw_info.sm_count = impl_.sm_count; + } + else { + impl_.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id); + hw_info.sm_count = impl_.sm_count; + } + + typename Gemm::GemmKernel::TileScheduler::Arguments scheduler_args; + if constexpr (std::is_same_v) { + scheduler_args = { splits }; + } + + /// Initializes data structures + /// A/B/C/D Tensor + initialize(problem_size); + + /// Initialize the epilogue arguments + EVTModule host_reference(problem_size, impl_, check_relative_equality); + + arguments = typename Gemm::Arguments{ + cutlass::gemm::GemmUniversalMode::kGemm, + problem_size, + { + impl_.tensor_A.device_data(), impl_.stride_a, + impl_.tensor_B.device_data(), impl_.stride_b + }, + { // Epilogue arguments + {}, // thread + static_cast(host_reference.get_tensor_C_ptr()), + impl_.stride_c, + static_cast(host_reference.get_tensor_D_ptr()), + impl_.stride_d + }, // Epilogue arguments end + hw_info, + scheduler_args + }; + + // Filling in the thread arguments + typename EVTModule::Arguments epilogue_args = host_reference.get_arguments(); + std::memcpy(&arguments.epilogue.thread, &epilogue_args.arg, sizeof(epilogue_args.arg)); + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.can_implement(arguments); + + if (status != cutlass::Status::kSuccess) { + cudaError_t error = cudaGetLastError(); + std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; + return true; + } + + // + // Run the GEMM + // + if (profiling) { + return impl_.profile(problem_size, iterations, gemm_op, arguments, workspace); + } + else { + cudaError_t result; + status = gemm_op.initialize(arguments, workspace.get()); + status = gemm_op.run(); + result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync."; + return false; + } + } + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + bool passed = this->verify(problem_size, host_reference); + if (!passed) { + std::cout << "Error : Failed \n"; + } + + return passed; + } +}; + + +template +bool TestAllEVT(bool check_relative_equality=false) { + using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape; + + int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB); + std::vector problem_size_m = {max_alignment, 512 - 3 * max_alignment}; + std::vector problem_size_n = {max_alignment, 512 - 2 * max_alignment}; + + if constexpr (std::is_same_v) { + problem_size_m.push_back(768); + problem_size_n.push_back(768); + } + + constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages; + constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{}); + + std::vector problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment}; + + Testbed3xEVT testbed(check_relative_equality); + bool passed = true; + + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + ProblemShapeType problem_size; + if constexpr (cute::rank(ProblemShapeType{}) == 4) { + problem_size = ProblemShapeType{m, n, k, /* l */ 1}; + } + else { + problem_size = ProblemShapeType{m, n, k}; + } + + passed = testbed.run(problem_size); + + if (!passed) { + return false; + } + } + } + } + + // if we do support batched GEMM, just run one test on it to save on test time + if constexpr (cute::rank(ProblemShapeType{}) == 4) { + auto problem_size = ProblemShapeType{256 + max_alignment, 256 + max_alignment, 160 + max_alignment, /* l */ 3}; + passed = testbed.run( + problem_size + ); + + if (!passed) { + return false; + } + } + + return passed; +} + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_testbed_3x_tensor_broadcast.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_testbed_3x_tensor_broadcast.hpp new file mode 100644 index 0000000000000000000000000000000000000000..145f8747254b84a12e3f4d03d55638b490877074 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_testbed_3x_tensor_broadcast.hpp @@ -0,0 +1,501 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with elementwise tensor-tensor broadcast epilogue +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "testbed_utils.h" +#include "gemm_testbed_3x.hpp" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct Testbed3xTensorBroadcast { + + using TestBedImpl = typename detail::TestbedImpl; + using Kernel = typename Gemm::GemmKernel; + using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue; + + using ElementA = typename Kernel::ElementA; + using StrideA = typename Kernel::StrideA; + using ElementB = typename Kernel::ElementB; + using StrideB = typename Kernel::StrideB; + using ElementC = typename Kernel::ElementC; + using StrideC = typename Kernel::StrideC; + using ElementD = typename Kernel::ElementD; + using StrideD = typename Kernel::StrideD; + + using ElementAccumulator = typename Kernel::ElementAccumulator; + using ElementCompute = typename Epilogue::ElementCompute; + using ElementScalar = typename Epilogue::ElementScalar; + using ProblemShapeType = typename Kernel::ProblemShape; + using ElementBias = typename Epilogue::ElementBias; + using ActivationFunctor = typename Epilogue::ActivationFunctor; + + static constexpr bool IsBinaryOp0Enabled = Epilogue::IsBinaryOp0Enabled; + static constexpr bool IsBinaryOp1Enabled = Epilogue::IsBinaryOp1Enabled; + static constexpr bool IsUnaryOpEnabled = Epilogue::IsUnaryOpEnabled; + + using LayoutTagA = typename TestBedImpl::LayoutTagA; + using LayoutTagB = typename TestBedImpl::LayoutTagB; + using LayoutTagC = typename TestBedImpl::LayoutTagC; + using LayoutTagD = typename TestBedImpl::LayoutTagD; + using LayoutTagVector = cutlass::layout::PackedVectorLayout; + + cutlass::HostTensor bias; + cutlass::HostTensor tensor_C1; + // tensor_C0 is taken from TestbedImpl's tensor_C + + + // Detail Implementation + TestBedImpl impl_; + + // + // Methods + // + Testbed3xTensorBroadcast( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = TestBedImpl::kDefaultSeed + ) : + impl_(init_A_, init_B_, init_C_, seed_) { } + + Testbed3xTensorBroadcast( + typename LayoutTagA::Stride stride_factor_A_, + typename LayoutTagB::Stride stride_factor_B_, + typename LayoutTagC::Stride stride_factor_C_, + typename LayoutTagD::Stride stride_factor_D_, + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = TestBedImpl::kDefaultSeed + ) : + impl_(stride_factor_A_, + stride_factor_B_, + stride_factor_C_, + stride_factor_D_, + init_A_, + init_B_, + init_C_, + seed_) { } + + /// Initializes data structures + void initialize(ProblemShapeType problem_size) { + // + // Allocate the GEMM workspace for A/B/C/D tensor + // + impl_.initialize(problem_size); + } + + void initialize_bias(ProblemShapeType problem_size) { + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + auto M = cute::get<0>(problem_shape_MNKL); + bias.resize(cutlass::Coord<1>(M)); + + EXPECT_TRUE(impl_.initialize_tensor(bias.host_view(), cutlass::Distribution::Uniform, impl_.seed + 2023)); + bias.sync_device(); + } + + void initialize_c1(ProblemShapeType problem_size) { + auto problem_shape_MNKL = cute::append<4>(problem_size, 1); + auto M = cute::get<0>(problem_shape_MNKL); + auto N = cute::get<1>(problem_shape_MNKL); + auto L = cute::get<3>(problem_shape_MNKL); + + auto c_coord = cutlass::make_Coord(M * L, N); + + tensor_C1.resize(c_coord, cutlass::layout::Affine2Layout_Factory::layout_factory(c_coord, impl_.stride_factor_C)); + EXPECT_TRUE(impl_.initialize_tensor(tensor_C1.host_view(), cutlass::Distribution::Uniform, impl_.seed + 2024)); + tensor_C1.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cute::Shape problem_shape_MNKL, + ElementScalar alpha, + ElementScalar beta, + bool use_bias) + { + auto [M, N, K, L] = problem_shape_MNKL; + auto coord_0 = cutlass::make_Coord(0); + + impl_.tensor_D.sync_host(); + EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.tensor_B.host_view()), 0); + + if (impl_.tensor_D.size() > 1) { + EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.tensor_D.host_view()), 0); + } + + if (impl_.reference_D.size() > 1) { + EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.reference_D.host_view()), 0); + } + + bool passed = cutlass::reference::host::TensorEquals(impl_.reference_D.host_view(), impl_.tensor_D.host_view()); + + EXPECT_TRUE(passed); + + if (!passed) { + std::stringstream fname; + fname << "error_Gemm_device_broadcast" + << M << "x" << N << "x" << K << "x" << L << "_" + << cute::get<0>(typename Gemm::GemmKernel::TileShape{}) << "_" + << cute::get<1>(typename Gemm::GemmKernel::TileShape{}) << "_" + << cute::get<2>(typename Gemm::GemmKernel::TileShape{}) << ".txt"; + + std::ofstream file(fname.str()); + file + << "problem: " << ' ' << M << "x" << N << "x" << K << ", Batch count = " << L + << ", alpha: " << float(alpha) << ", beta: " << float(beta) << ", use_bias: " << use_bias << "\n\n"; + + if (use_bias){ + file << "Bias = \n" << bias.host_view()<< "\n\n"; + } + + file + << "A =\n" << impl_.tensor_A.host_view() + << "\nB =\n" << impl_.tensor_B.host_view() + << "\nC0 =\n" << impl_.tensor_C.host_view() + << "\nC1 =\n" << tensor_C1.host_view() + << "\n\nReference =\n" << impl_.reference_D.host_view() + << "\n\nComputed =\n" <(problem_size, 1); + auto M = cute::get<0>(problem_shape_MNKL); + auto N = cute::get<1>(problem_shape_MNKL); + auto K = cute::get<2>(problem_shape_MNKL); + auto L = cute::get<3>(problem_shape_MNKL); + auto coord_0 = cutlass::make_Coord(0); + + auto A = cute::make_tensor(impl_.tensor_A.host_data(), + cute::make_layout(cute::make_shape(M, K, L), impl_.stride_a)); + auto B = cute::make_tensor(impl_.tensor_B.host_data(), + cute::make_layout(cute::make_shape(N, K, L), impl_.stride_b)); + auto D = cute::make_tensor(impl_.reference_D.host_data(), + cute::make_layout(cute::make_shape(M, N, L), impl_.stride_d)); + auto Bias = cute::make_tensor(static_cast(use_bias ? bias.host_data() : nullptr), + cute::make_layout(cute::make_shape(M, 1))); + auto C0 = cute::make_tensor(impl_.tensor_C.host_data(), + cute::make_layout(cute::make_shape(M, N, L), impl_.stride_c)); + auto C1 = cute::make_tensor(tensor_C1.host_data(), + cute::make_layout(cute::make_shape(M, N, L), impl_.stride_c)); + + // Create host workspace for output of testbed. This computes a portion of the epilogue: + // ref_compute_out = Activation(alpha * (A @ B) + bias) + cutlass::HostTensor ref_compute_out; + auto c_coord = cutlass::make_Coord(M * L, N); + ref_compute_out.resize(c_coord, cutlass::layout::Affine2Layout_Factory::layout_factory(c_coord, impl_.stride_factor_C), false); + auto RefComputeOut = cute::make_tensor(ref_compute_out.host_data(), + cute::make_layout(cute::make_shape(M, N, L), impl_.stride_c)); + + cutlass::reference::host::GettMainloopParams mainloop_params{A, B}; + + // Use a dummy null tensor for operand C because the epilogue overrides C. + auto dummy_C = cute::make_tensor(static_cast(nullptr), + cute::make_layout(cute::make_shape(M, N, L), impl_.stride_c)); + ElementCompute dummy_beta(0); + auto dummy_Aux = cute::make_tensor(static_cast(nullptr), + cute::make_layout(cute::make_shape(M, N, L), impl_.stride_d)); + auto dummy_Valpha = cute::make_tensor(static_cast(nullptr), + cute::make_layout(cute::make_shape(M, 1))); + auto dummy_Vbeta = cute::make_tensor(static_cast(nullptr), + cute::make_layout(cute::make_shape(M, 1))); + + cutlass::reference::host::GettEpilogueParams< + ElementScalar, + ElementScalar, + ElementAccumulator, + ElementCompute, + decltype(dummy_C), + decltype(RefComputeOut), + decltype(Bias), + decltype(dummy_Aux), + decltype(dummy_Valpha), + decltype(dummy_Vbeta), + ActivationFunctor> epilogue_params{ + alpha, + dummy_beta, + dummy_C, + RefComputeOut, + Bias, + dummy_Aux, + dummy_Valpha, + dummy_Vbeta + }; + + cutlass::reference::host::Gemm3x(mainloop_params, epilogue_params); + + cutlass::NumericConverter source_converter; + cutlass::NumericConverter destination_converter; + cutlass::multiplies mul; + + // Compute broadcast operations atop the reference + #pragma omp parallel for collapse(3) + for (int64_t l = 0; l < cute::size<2>(A.layout()); ++l) { + for (int64_t m = 0; m < cute::size<0>(A.layout()); ++m) { + for (int64_t n = 0; n < cute::size<0>(B.layout()); ++n) { + ElementCompute intermediate = RefComputeOut(m, n, l); + // Apply BinaryOp0, if needed + if constexpr (IsBinaryOp0Enabled) { + typename Epilogue::ThreadEpilogueOp::BinaryOp0 bin0; + ElementCompute converted_source = source_converter(C0(m, n, l)); + intermediate = bin0(intermediate, mul(beta, converted_source)); + } + + // Apply BinaryOp1, if needed + if constexpr (IsBinaryOp1Enabled) { + typename Epilogue::ThreadEpilogueOp::BinaryOp1 bin1; + ElementCompute converted_source = source_converter(C1(m, n, l)); + intermediate = bin1(intermediate, mul(beta, converted_source)); + } + + // Apply UnaryOp, if needed + if constexpr (IsUnaryOpEnabled) { + typename Epilogue::ThreadEpilogueOp::UnaryOp unary; + intermediate = unary(intermediate); + } + + D(m, n, l) = destination_converter(intermediate); + } + } + } + + return compare_reference(problem_shape_MNKL, alpha, beta, use_bias); + } + + /// Executes one test + bool run( + ProblemShapeType problem_size, + ElementScalar alpha = ElementScalar(1), + ElementScalar beta = ElementScalar(0), + bool profiling = false, + int iterations = 20, + bool use_bias = true) + { + // Fail test if insufficient CUDA device + if (!impl_.sufficient()) { + std::cout << "Test failed due to insufficient CUDA device." << std::endl; + return false; + } + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments; + cutlass::KernelHardwareInfo hw_info; + hw_info.device_id = 0; + if (not profiling) { + impl_.sm_count = min(impl_.MaxSmCount, cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id)); + hw_info.sm_count = impl_.sm_count; + } + else { + impl_.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id); + hw_info.sm_count = impl_.sm_count; + } + + /// Initializes data structures + /// A/B/C0/D Tensor + initialize(problem_size); + initialize_bias(problem_size); + + if constexpr (IsBinaryOp1Enabled) { + initialize_c1(problem_size); + } + + arguments = typename Gemm::Arguments{ + cutlass::gemm::GemmUniversalMode::kGemm, + problem_size, + { impl_.tensor_A.device_data(), impl_.stride_a, + impl_.tensor_B.device_data(), impl_.stride_b, + impl_.mma_promotion_interval + }, + { // Epilogue arguments + { alpha, beta }, // ThreadOp arguments + impl_.stride_c, + impl_.tensor_D.device_data(), + impl_.stride_d, + use_bias ? bias.device_data() : nullptr, + impl_.tensor_C.device_data(), + tensor_C1.device_data() + }, // Epilogue arguments end + hw_info + }; + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.can_implement(arguments); + + if (status != cutlass::Status::kSuccess) { + cudaError_t error = cudaGetLastError(); + std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; + return true; + } + + // + // Run the GEMM + // + + if (profiling) { + return impl_.profile(problem_size, iterations, gemm_op, arguments, workspace); + } + else { + cudaError_t result; + status = gemm_op.initialize(arguments, workspace.get()); + status = gemm_op.run(); + result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync."; + return false; + } + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + bool passed = this->verify(problem_size, alpha, beta, use_bias); + if (!passed) { + std::cout << "Error : Failed : with alpha: " << float(alpha) + << ", beta: " << float(beta) + << ", use_bias: " << use_bias + << "\n"; + } + + return passed; + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +bool TestAllTensorBroadcast(bool use_bias=true) { + using ElementScalar = typename Gemm::GemmKernel::CollectiveEpilogue::ElementScalar; + using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape; + + int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB); + std::vector problem_size_m = {max_alignment, 512 - 3 * max_alignment}; + std::vector problem_size_n = {max_alignment, 512 - 2 * max_alignment}; + + if constexpr (std::is_same_v) { + problem_size_m.push_back(768); + problem_size_n.push_back(768); + } + + constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages; + constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{}); + + std::vector problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment}; + + Testbed3xTensorBroadcast testbed; + bool passed = true; + + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + ProblemShapeType problem_size; + if constexpr (cute::rank(ProblemShapeType{}) == 4) { + problem_size = ProblemShapeType{m, n, k, /* l */ 1}; + } + else { + problem_size = ProblemShapeType{m, n, k}; + } + + for (bool use_bias : {true, false}) { + passed = testbed.run( + problem_size, + cutlass::from_real(1), + cutlass::from_real(1), + false, // profiling + 20, // iterations + use_bias + ); + + if (!passed) { + return false; + } + } + } + } + } + + if constexpr (cute::rank(ProblemShapeType{}) == 4) { + auto problem_size = ProblemShapeType{256 + max_alignment, 256 + max_alignment, 160 + max_alignment, /* l */ 3}; + passed = testbed.run( + problem_size, + cutlass::from_real(1), + cutlass::from_real(1), + false, // profiling + 20 // iterations + ); + if (!passed) { + return false; + } + } + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32n_tf32n_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32n_tf32n_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..1c33d504fb38128cd26f0185bbd1eefedc7cfb4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32n_tf32n_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,555 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 128x256x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 64x256x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 256x64x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 64x128x16_32x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 128x64x16_64x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32t_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32n_tf32t_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32n_tf32t_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..375e7b9ac1dd8a1bf6b3ed1696ede8110eb07f23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32n_tf32t_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,555 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 128x256x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 64x256x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 256x64x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 64x128x16_32x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 128x64x16_64x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32t_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32t_tf32n_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32t_tf32n_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..335336818b5d894e40b0670627b73262fd0570a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32t_tf32n_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,493 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 128x256x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 256x64x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 64x128x16_32x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 128x64x16_64x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32t_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32t_tf32t_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32t_tf32t_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..3b243b2d55dd755c2abe0469919ef4f5959c29aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_tf32t_tf32t_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,556 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<64, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 128x256x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 64x256x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 256x64x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 64x128x16_32x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 128x64x16_64x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32t_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + cutlass::tfloat32_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_u8t_u8n_s32t_wmma_tensor_op_s32_sm72.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_u8t_u8n_s32t_wmma_tensor_op_s32_sm72.cu new file mode 100644 index 0000000000000000000000000000000000000000..a39f29db22154e2ab42ef780f950662063beaa63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_u8t_u8n_s32t_wmma_tensor_op_s32_sm72.cu @@ -0,0 +1,185 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM72_ENABLED +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 16x16x16, DataType/Instruction = u8*u8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_u8t_u8n_s32t_wmma_tensor_op_s32, 128x128x32_64x64x32_16x16x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + uint8_t, + cutlass::layout::RowMajor, + uint8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM75_Device_Gemm_u8t_u8n_s32t_wmma_tensor_op_s32, 64x128x64_32x32x64_16x16x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + uint8_t, + cutlass::layout::RowMajor, + uint8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 32x8x16, DataType/Instruction = u8*u8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_u8t_u8n_s32t_wmma_tensor_op_s32, 64x128x64_32x64x64_32x8x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + uint8_t, + cutlass::layout::RowMajor, + uint8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +//////////////// WMMA Size = 8x32x16, DataType/Instruction = u8*u8+s32=>s32 ////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_Device_Gemm_u8t_u8n_s32t_wmma_tensor_op_s32, 64x128x64_32x64x64_8x32x16) { + + using ElementOutput = int32_t; + using ElementAccumulator = int32_t; + + using Gemm = cutlass::gemm::device::Gemm< + uint8_t, + cutlass::layout::RowMajor, + uint8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassWmmaTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 128, 64>, + cutlass::gemm::GemmShape<32, 64, 64>, + cutlass::gemm::GemmShape<32, 8, 16>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} +#endif //CUTLASS_ARCH_WMMA_SM72_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf32n_cf32n_cf32n_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf32n_cf32n_cf32n_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..96981a2d73b81358ab1c3eb69c6fbb97fbd01595 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf32n_cf32n_cf32n_tensor_op_f32_sm80.cu @@ -0,0 +1,199 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/gemm_universal.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_universal.h" + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf32n_cf32t_cf32n_tensor_op_f32, 64x64x16_32x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf32n_cf32h_cf32n_tensor_op_f32, 64x64x16_32x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf32h_cf32t_cf32n_tensor_op_f32, 64x64x16_32x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf32h_cf32c_cf32n_tensor_op_f32, 64x64x16_32x32x16) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..29103d08f1e3bacb6105e3f81ad0591c894f5f0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm80.cu @@ -0,0 +1,200 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/gemm_universal.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_universal.h" + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf64n_cf64t_cf64n_tensor_op_f64_gaussian, 64x64x32_32x32x32) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf64n_cf64h_cf64n_tensor_op_f64_gaussian, 64x64x32_32x32x32) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf64h_cf64t_cf64n_tensor_op_f64_gaussian, 64x32x32_32x16x32) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf64h_cf64c_cf64n_tensor_op_f64_gaussian, 64x64x32_32x16x32) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf64n_cf64t_cf64t_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf64n_cf64t_cf64t_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..b82c5e5389907bad013e35d511bc1960f690ea6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_cf64n_cf64t_cf64t_tensor_op_f64_sm80.cu @@ -0,0 +1,200 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/gemm_universal.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_universal.h" + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf64n_cf64t_cf64n_tensor_op_f64, 64x64x32_32x32x32) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf64n_cf64h_cf64n_tensor_op_f64, 64x64x32_32x32x32) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf64h_cf64t_cf64n_tensor_op_f64, 64x64x32_32x32x32) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmUniversal_cf64h_cf64c_cf64n_tensor_op_f64, 64x64x32_32x32x32) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmUniversal< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 3, + 1, + 1, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_f16n_f16t_f32t_tensor_op_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_f16n_f16t_f32t_tensor_op_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..771573bb46762dee2773f4dfaacfae5d8353fe38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_f16n_f16t_f32t_tensor_op_f32_sm75.cu @@ -0,0 +1,115 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/gemm_universal.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_universal.h" + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmUniversal_f16n_f16t_f32t_tensor_op_f32, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 2>; + + EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal()); +} + +TEST(SM75_Device_GemmUniversal_f16n_f16t_f32t_tensor_op_f32, 64x64x32_32x32x32_updated_batch_count) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::GemmUniversal< + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + ElementOutput, cutlass::layout::RowMajor, + ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, 128 / cutlass::sizeof_bits::value, + ElementAccumulator, ElementAccumulator>, + cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, + 2, + 1, + 1>; + + EXPECT_TRUE(test::gemm::device::TestGemmUniversal( + {128, 128, 2}, + cutlass::gemm::GemmUniversalMode::kGemm, + 15)); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..8a1884d8b4ac83c53ad7c186b5baaaf731893ff3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu @@ -0,0 +1,464 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/functional.h" + +#include "cutlass/gemm/kernel/default_gemm_with_broadcast.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" +#include "cutlass/epilogue/thread/linear_combination_bias_relu.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_gemm_with_broadcast.h" + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes: +/// +/// Z = GEMM+Bias+ReLu +/// T = Relu conditional +/// +template +struct GemmWithBiasReluReferenceOp { + + using OutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; + + using ElementCompute = typename OutputOp::ElementCompute; + using ElementZ = typename OutputOp::ElementZ; + using ElementT = typename OutputOp::ElementT; + + typename OutputOp::BinaryOp binary_op; + typename OutputOp::ElementwiseOp elementwise_op; + + GemmWithBiasReluReferenceOp() { } + + void operator()(ElementZ &Z, ElementT &T, ElementCompute gemm, ElementCompute bias) { + + ElementCompute kThreshold = ElementCompute(); + + ElementCompute z_full = binary_op(gemm, bias); + + bool conditional = (z_full >= kThreshold); + + if (!conditional) { + z_full = kThreshold; + } + + Z = ElementZ(z_full); + T = ElementT(conditional); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise< + cutlass::half_t, + float, + float, + cutlass::half_t, + cutlass::half_t, + 8, + cutlass::epilogue::thread::GELU_taylor + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise< + cutlass::half_t, + float, + float, + cutlass::half_t, + cutlass::half_t, + 8, + cutlass::epilogue::thread::GELU_taylor + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast(); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu< + cutlass::half_t, + float, + float, + cutlass::half_t, + 8, + true + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast >(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu< + cutlass::half_t, + float, + float, + cutlass::half_t, + 8, + true + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast >(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if defiend(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x5_64x64x32_16x8x16) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise< + cutlass::half_t, + float, + float, + cutlass::half_t, + cutlass::half_t, + 8, + cutlass::epilogue::thread::GELU_taylor + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 5, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast(); +} + +TEST(SM80_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x5_64x64x32_16x8x16) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu< + cutlass::half_t, + float, + float, + cutlass::half_t, + 8, + true + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 5, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast>(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x4_64x64x32_16x8x16) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise< + cutlass::half_t, + float, + float, + cutlass::half_t, + cutlass::half_t, + 8, + cutlass::epilogue::thread::GELU_taylor + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 4, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast(); +} + +TEST(SM80_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x4_64x64x32_16x8x16) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu< + cutlass::half_t, + float, + float, + cutlass::half_t, + 8, + true + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 4, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast>(); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x3_64x64x32_16x8x16) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise< + cutlass::half_t, + float, + float, + cutlass::half_t, + cutlass::half_t, + 8, + cutlass::epilogue::thread::GELU_taylor + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast(); +} + +TEST(SM80_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x3_64x64x32_16x8x16) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu< + cutlass::half_t, + float, + float, + cutlass::half_t, + 8, + true + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithBroadcast< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::TestAllGemmWithBroadcast >(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_reduction_f16n_f16n_f16n_tensorop_f32_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_reduction_f16n_f16n_f16n_tensorop_f32_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..15eca4b0097e310864403b620e835d3a9a3c77cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_reduction_f16n_f16n_f16n_tensorop_f32_sm75.cu @@ -0,0 +1,384 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/functional.h" + +#include "cutlass/gemm/kernel/default_gemm_with_reduction.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +#include "cutlass/epilogue/thread/linear_combination_drelu.h" +#include "cutlass/epilogue/thread/linear_combination_dgelu.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_gemm_with_reduction.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +struct dReluLambda { + float operator()(float d_y, float t) { + if (t <= 0) { + d_y = 0; + } + return d_y; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmWithReduction_dReLU_bGrad_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< + float, + float, + cutlass::half_t, + cutlass::half_t, + 8 + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithReduction< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + EpilogueOutputOp, + cutlass::plus, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + using ReferenceOp = test::gemm::device::GemmWithReductionReference< + Gemm, + dReluLambda + >; + + test::gemm::device::TestGemmWithReduction( + {520, 264, 96}, + cutlass::gemm::GemmUniversalMode::kGemm, + 2, + float(1.25), + float(2.25) + ); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmWithReduction_dReLU_bGrad_f16n_f16n_f16n_tensor_op_f32, 256x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< + float, + float, + cutlass::half_t, + cutlass::half_t, + 8 + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithReduction< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + EpilogueOutputOp, + cutlass::plus, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + using ReferenceOp = test::gemm::device::GemmWithReductionReference< + Gemm, + dReluLambda + >; + + test::gemm::device::TestGemmWithReduction( + {520, 264, 96}, + cutlass::gemm::GemmUniversalMode::kGemm, + 1, + float(1.25), + float(2.25) + ); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmWithReduction_dReLU_bGrad_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< + float, + float, + cutlass::half_t, + cutlass::half_t, + 8 + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithReduction< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + EpilogueOutputOp, + cutlass::plus, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + using ReferenceOp = test::gemm::device::GemmWithReductionReference< + Gemm, + dReluLambda + >; + + test::gemm::device::TestGemmWithReduction( + {520, 264, 96}, + cutlass::gemm::GemmUniversalMode::kGemm, + 2, + float(1.25), + float(2.25) + ); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmWithReduction_dReLU_bGrad_f16n_f16n_f16n_tensor_op_f32, 256x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< + float, + float, + cutlass::half_t, + cutlass::half_t, + 8 + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithReduction< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + EpilogueOutputOp, + cutlass::plus, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + using ReferenceOp = test::gemm::device::GemmWithReductionReference< + Gemm, + dReluLambda + >; + + test::gemm::device::TestGemmWithReduction( + {520, 264, 96}, + cutlass::gemm::GemmUniversalMode::kGemm, + 1, + float(1.25), + float(2.25) + ); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace device { + +template +struct Gemm_dReLU_packed_bits_reference_op { + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = typename Gemm::GemmKernel::Epilogue::ElementCompute; + using ElementC = typename Gemm::ElementC; + using ElementT = typename Gemm::GemmKernel::Epilogue::ElementTensor; + + // + // Methods + // + + Gemm_dReLU_packed_bits_reference_op() { } + + ElementCompute operator()( + ElementAccumulator d_y, + ElementT t) const { + + ElementCompute result = ElementCompute(d_y); + + bool cond = bool(t); + if (!cond) { + result = ElementCompute(); + } + + return result; + } +}; + +} // namespace device +} // namespace gemm +} // namespace test + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_Device_GemmWithReduction_dReLU_conditional_bits_bGrad_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationDReluConditionalBits< + float, + float, + cutlass::half_t, + 8 + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithReduction< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm75, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + EpilogueOutputOp, + cutlass::plus, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + using ReferenceOp = test::gemm::device::Gemm_dReLU_packed_bits_reference_op; + + test::gemm::device::TestGemmWithReduction( + {520, 264, 96}, + cutlass::gemm::GemmUniversalMode::kGemm, + 2, + float(1.25), + float(2.25) + ); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_Device_GemmWithReduction_dReLU_conditional_bits_bGrad_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationDReluConditionalBits< + float, + float, + cutlass::half_t, + 8 + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithReduction< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm70, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<8, 8, 4>, + EpilogueOutputOp, + cutlass::plus, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 2, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + using ReferenceOp = test::gemm::device::Gemm_dReLU_packed_bits_reference_op; + + test::gemm::device::TestGemmWithReduction( + {520, 264, 96}, + cutlass::gemm::GemmUniversalMode::kGemm, + 2, + float(1.25), + float(2.25) + ); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if defiend(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_reduction_f16t_f16n_f16n_tensorop_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_reduction_f16t_f16n_f16n_tensorop_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..3e04929ac27a12cfc95bc12d50140cddb3409991 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemm_with_reduction_f16t_f16n_f16n_tensorop_f32_sm80.cu @@ -0,0 +1,118 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/functional.h" + +#include "cutlass/gemm/kernel/default_gemm_with_reduction.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +#include "cutlass/epilogue/thread/linear_combination_drelu.h" +#include "cutlass/epilogue/thread/linear_combination_dgelu.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_gemm_with_reduction.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +struct dReluLambda { + float operator()(float d_y, float t) { + if (t <= 0) { + d_y = 0; + } + return d_y; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_GemmWithReduction_dReLU_bGrad_f16t_f16n_f16n_tensor_op_f32, 128x128x32_64x64x32) { + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< + float, + float, + cutlass::half_t, + cutlass::half_t, + 8 + >; + + using GemmKernel = + typename cutlass::gemm::kernel::DefaultGemmWithReduction< + cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand + cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand + cutlass::half_t, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 16>, + EpilogueOutputOp, + cutlass::plus, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 5, + cutlass::arch::OpMultiplyAdd + >::GemmKernel; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + using ReferenceOp = test::gemm::device::GemmWithReductionReference< + Gemm, + dReluLambda + >; + + test::gemm::device::TestGemmWithReduction( + {136, 6920, 512}, + cutlass::gemm::GemmUniversalMode::kGemm + ); +} + +#endif // if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemv.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemv.cu new file mode 100644 index 0000000000000000000000000000000000000000..253b72d7b7a1b92b1a3d8a9213bfeebb6768bbf0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/gemv.cu @@ -0,0 +1,586 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for device-wide GEMV interface +*/ + +#include +#include +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/kernel/gemv.h" +#include "cutlass/gemm/device/gemv.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/gemm_complex.h" + +#include "testbed_utils.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { + +template +class TestbedGemv { +public: + + using ElementA = typename Gemv::ElementA; + using LayoutA = typename Gemv::LayoutA; + using ElementB = typename Gemv::ElementB; + using ElementC = typename Gemv::ElementC; + + using ElementAccumulator = typename Gemv::ElementAccumulator; + using ElementCompute = typename Gemv::EpilogueOutputOp::ElementCompute; + + using LayoutV = cutlass::layout::RowMajor; + +private: + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D; + cutlass::HostTensor reference_D; + +public: + + // + // Methods + // + + TestbedGemv( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2023 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Initializes data structures + void initialize( + cutlass::MatrixCoord problem_size, + int32_t batch_count + ) { + + // + // Allocate the GEMV workspace + // + + if(std::is_same::value) { + tensor_A.resize({problem_size.row(), batch_count * problem_size.column()}); + } + else { + tensor_A.resize({batch_count * problem_size.row(), problem_size.column()}); + } + + tensor_B.resize({batch_count * problem_size.column(), 1}); + tensor_C.resize({batch_count * problem_size.row(), 1}); + tensor_D.resize({batch_count * problem_size.row(), 1}); + reference_D.resize({batch_count * problem_size.row(), 1}, false); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 1)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 3)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = typename Gemv::ElementA(1); + tensor_B.host_view().at({0, 0}) = typename Gemv::ElementB(1); + tensor_C.host_view().at({0, 0}) = typename Gemv::ElementC(1); + + cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::MatrixCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view()); + + EXPECT_TRUE(passed) << " mismatched reference"; + + if (!passed) { + + std::ofstream file("testbed_universal_errors.txt"); + + file + << "problem: " << problem_size + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << tensor_A.host_view() + << "\nB =\n" << tensor_B.host_view() + << "\nC =\n" << tensor_C.host_view() + << "\n\nReference =\n" << reference_D.host_view() + << "\nComputed =\n" << tensor_D.host_view(); + } + + return passed; + } + + /// Verifies the result + bool verify( + cutlass::MatrixCoord problem_size, + int32_t batch_count, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + ElementCompute alpha, + ElementCompute beta) { + + // + // Verify + // + + cutlass::reference::host::GemmComplex< + typename Gemv::ElementA, typename Gemv::LayoutA, + typename Gemv::ElementB, LayoutV, + typename Gemv::ElementC, LayoutV, + ElementCompute, ElementAccumulator + >( + {problem_size.row(), 1, problem_size.column()}, + alpha, + tensor_A.host_ref(), + Gemv::kTransformA, + tensor_B.host_ref(), + Gemv::kTransformB, + beta, + tensor_C.host_ref(), + reference_D.host_ref(), + ElementAccumulator(0), + batch_count, + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D + ); + + return compare_reference(problem_size, alpha, beta); + } + + /// Runs one problem size + bool run( + cutlass::MatrixCoord problem_size, + int32_t batch_count, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + ElementCompute alpha, + ElementCompute beta) { + + this->initialize(problem_size, batch_count); + + // + // Initialize the GEMV operator + // + + typename Gemv::Arguments arguments{ + problem_size, + batch_count, + {alpha, beta}, + tensor_A.device_ref(), + tensor_B.device_data(), + tensor_C.device_data(), + tensor_D.device_data(), + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D + }; + + Gemv gemm_op; + + cutlass::Status status = gemm_op.can_implement(arguments); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + size_t workspace_size = Gemv::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + status = gemm_op.initialize(arguments, workspace.get()); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Run the GEMV + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = this->verify( + problem_size, + batch_count, + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D, + alpha, + beta); + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +bool TestAllGemv() { + + using ElementCompute = typename Gemv::EpilogueOutputOp::ElementCompute; + + int Batch[] = { + 1, 520, 1314 + }; + + int M[] = { + 1, 5, 16 + }; + + int K[] = { + 8, 128, 256 + }; + + double Alpha[] = { + 1, 1.25 + }; + + double Beta[] = { + 0, 1, 1.25 + }; + + for (int b : Batch) { + for (int m : M) { + for (int k : K) { + for (double alpha : Alpha) { + for (double beta : Beta) { + + TestbedGemv testbed; + + if (!testbed.run( + {m, k}, + b, + m * k, + k, + m, + m, + ElementCompute(alpha), + ElementCompute(beta))) { + return false; + } + } + } + } + } + } + + return true; +} + +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemv_f16n_f16_f16_simt_f32, RowMajorA) { + + using ElementInput = cutlass::half_t; + using ElementOutput = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementAccumulator = float; + int const kElementsPerAccess = 8; + + using EpilogueOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator>; + + using Gemv = cutlass::gemm::device::Gemv< + cutlass::gemm::kernel::Gemv< + ElementInput, // Element A + LayoutA, // Layout A + ElementInput, // Element B + ElementOutput, // Element C + ElementAccumulator, // Element accumulator + EpilogueOp, // Output operator + kElementsPerAccess // Element access granularity + > + >; + + EXPECT_TRUE(test::gemm::TestAllGemv()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemv_f32n_f32_f32_simt_f32, RowMajorA) { + + using ElementInput = float; + using ElementOutput = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementAccumulator = float; + int const kElementsPerAccess = 4; + + using EpilogueOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator>; + + using Gemv = cutlass::gemm::device::Gemv< + cutlass::gemm::kernel::Gemv< + ElementInput, // Element A + LayoutA, // Layout A + ElementInput, // Element B + ElementOutput, // Element C + ElementAccumulator, // Element accumulator + EpilogueOp, // Output operator + kElementsPerAccess // Element access granularity + > + >; + + EXPECT_TRUE(test::gemm::TestAllGemv()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemv_f64n_f64_f64_simt_f64, RowMajorA) { + + using ElementInput = double; + using ElementOutput = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementAccumulator = double; + int const kElementsPerAccess = 2; + + using EpilogueOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator>; + + using Gemv = cutlass::gemm::device::Gemv< + cutlass::gemm::kernel::Gemv< + ElementInput, // Element A + LayoutA, // Layout A + ElementInput, // Element B + ElementOutput, // Element C + ElementAccumulator, // Element accumulator + EpilogueOp, // Output operator + kElementsPerAccess // Element access granularity + > + >; + + EXPECT_TRUE(test::gemm::TestAllGemv()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemv_f16n_f16_f16_simt_f32, ColumnMajorA) { + + using ElementInput = cutlass::half_t; + using ElementOutput = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using EpilogueOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator>; + + using Gemv = cutlass::gemm::device::Gemv< + cutlass::gemm::kernel::Gemv< + ElementInput, // Element A + LayoutA, // Layout A + ElementInput, // Element B + ElementOutput, // Element C + ElementAccumulator, // Element Accumulator + EpilogueOp // Output operator + > + >; + + + EXPECT_TRUE(test::gemm::TestAllGemv()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemv_f32n_f32_f32_simt_f32, ColumnMajorA) { + + using ElementInput = float; + using ElementOutput = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using EpilogueOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator>; + + using Gemv = cutlass::gemm::device::Gemv< + cutlass::gemm::kernel::Gemv< + ElementInput, // Element A + LayoutA, // Layout A + ElementInput, // Element B + ElementOutput, // Element C + ElementAccumulator, // Element Accumulator + EpilogueOp // Output operator + > + >; + + + EXPECT_TRUE(test::gemm::TestAllGemv()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemv_f64n_f64_f64_simt_f64, ColumnMajorA) { + + using ElementInput = double; + using ElementOutput = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using EpilogueOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator>; + + using Gemv = cutlass::gemm::device::Gemv< + cutlass::gemm::kernel::Gemv< + ElementInput, // Element A + LayoutA, // Layout A + ElementInput, // Element B + ElementOutput, // Element C + ElementAccumulator, // Element Accumulator + EpilogueOp // Output operator + > + >; + + + EXPECT_TRUE(test::gemm::TestAllGemv()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..e09bf17591b1a8a141c57d05dfce188752f03516 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_f32_ls_sm80.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HEMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_ls_l_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_ls_u_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_ls_u_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_f32_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_f32_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..45fbebd34d3866dd7bea128c3a3c81b32d722c0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_f32_rs_sm80.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HEMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_rs_l_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_rs_u_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_rs_u_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_fast_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_fast_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..def5edd42161e4ee91ce85552fb90e101c56919f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_fast_f32_ls_sm80.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HEMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_ls_l_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_ls_u_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_ls_u_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_fast_f32_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_fast_f32_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..ebf905506d17861343c5b13ce5053602e212f319 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_fast_f32_rs_sm80.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HEMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_rs_l_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_rs_u_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf32h_cf32n_rs_u_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..99b446af6423b25528c8c2309670ebe527b35b88 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu @@ -0,0 +1,134 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HEMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Hemm_cf64h_cf64n_ls_l_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Hemm_cf64h_cf64n_rs_u_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_ls_f64_gaussian_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_ls_f64_gaussian_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..882bbf2ff44164de225354e296fe94d2f4082226 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_ls_f64_gaussian_sm80.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HEMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_ls_l_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_ls_u_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_ls_u_tensor_op_f64_gaussian, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_ls_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_ls_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..4b4b166dd6bc7bd3e83e4226fb960e43d6b44a33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_ls_f64_sm80.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HEMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_ls_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_ls_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_ls_u_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_rs_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_rs_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..d6d1690e5fa85a2f695630aeab43e6de30c44c90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/hemm_cf64h_cf64n_cf64n_tensor_op_rs_f64_sm80.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HEMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_rs_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_rs_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Hemm_cf64h_cf64n_rs_u_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Hemm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..1764e3273f976670d4467b725ab1afdbc5cac2cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_f32_sm80.cu @@ -0,0 +1,149 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HER2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2k_cf32n_cf32n_l_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2k_cf32h_cf32n_l_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..926ed0a98b3b3810f113945034e16925a2bd1ce5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,149 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HER2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2k_cf32n_cf32n_l_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2k_cf32h_cf32n_l_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64_cf64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64_cf64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..ec0e03fa8408e5d518e2112fbfd3e118e7fe9c0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64_cf64_tensor_op_f64_sm90.cu @@ -0,0 +1,148 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HER2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Her2k_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Her2k_cf64c_cf64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64h_cf64n_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64h_cf64n_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..46975980dabe3a17c5ffab51bc31ce76c416f370 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64h_cf64n_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,310 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +// NOTE: HER2K requires that LayoutA == LayoutB, and that LayoutC == ColumnMajor + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64h_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kConjugate, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kConjugate, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64h_cf64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kConjugate, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kConjugate, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64h_cf64n_l_tensor_op_f64, 32x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kConjugate, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kConjugate, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64h_cf64n_l_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kConjugate, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kConjugate, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64h_cf64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kConjugate, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kConjugate, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64h_cf64n_u_tensor_op_f64, 32x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kConjugate, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kConjugate, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64h_cf64n_u_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kConjugate, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kConjugate, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64n_cf64n_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64n_cf64n_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..c7dca8cd23a3e4670bceec4aaf7c981fdbe1c920 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64n_cf64n_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,310 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +// NOTE: HER2K requires that LayoutA == LayoutB, and that LayoutC == ColumnMajor + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64n_cf64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64n_cf64n_l_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64n_cf64n_l_tensor_op_f64, 32x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64n_cf64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64n_cf64n_u_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2KGrouped_cf64n_cf64n_u_tensor_op_f64, 32x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kHermitian>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64n_cf64t_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64n_cf64t_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..3a659310809182354d466993756ad8452a303070 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/her2k_cf64n_cf64t_tensor_op_f64_sm80.cu @@ -0,0 +1,201 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HER2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// +#if 0 // HER2K with RowMajor output is not supported +TEST(SM80_Device_Her2k_cf64n_cf64t_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + false, // IsBetaZero + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2k_cf64c_cf64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + false, // IsBetaZero + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Her2k_cf64h_cf64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + false, // IsBetaZero + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KHermitianUniversal()); +} +#endif + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf32h_cf32n_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf32h_cf32n_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..56ef601f53a3d1456b60bdbb452284fa04ba5080 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf32h_cf32n_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,219 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HERK interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// +// HERK operator on CUBLAS_OP_N (column-major) input layouts +TEST(SM80_Device_Herk_cf32n_cf32n_l_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// HERK operator on CUBLAS_OP_N (column-major) input layouts +TEST(SM80_Device_Herk_cf32n_cf32n_u_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// HERK operator on CUBLAS_OP_C (row-major + conj) input layouts +TEST(SM80_Device_Herk_cf32h_cf32n_l_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// HERK operator on CUBLAS_OP_C (row-major + conj) input layouts +TEST(SM80_Device_Herk_cf32h_cf32n_u_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf64_cf64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf64_cf64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..c853ed4cf801d5f16efa6e2c635b42a276102c16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf64_cf64_tensor_op_f64_sm90.cu @@ -0,0 +1,92 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HERK interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// +// HERK operator on CUBLAS_OP_C (row-major + conj) input layouts +TEST(SM90_Device_Herk_cf64h_cf64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf64h_cf64n_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf64h_cf64n_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..71d8a9c86347a41b066dd473eaa5acbf44be5128 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/herk_cf64h_cf64n_tensor_op_f64_sm80.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide HERK interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// +// HERK operator on CUBLAS_OP_N (column-major) input layouts +TEST(SM80_Device_Herk_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// HERK operator on CUBLAS_OP_N (column-major) input layouts +TEST(SM80_Device_Herk_cf64n_cf64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// HERK operator on CUBLAS_OP_C (row-major + conj) input layouts +TEST(SM80_Device_Herk_cf64h_cf64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate, + cutlass::BlasMode::kHermitian + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/multistage_testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/multistage_testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..f0f71bb8f6c29c7bfa6e7fd427c80e25460a7866 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/multistage_testbed.h @@ -0,0 +1,300 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_utils.h" + +namespace test { +namespace gemm { +namespace device { + +//////////////////////////////////////////////////////////////////////////////// + +template +struct MultistageTestbed { + + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using ElementC = typename Gemm::ElementC; + + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = + typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + // + // Methods + // + + MultistageTestbed( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080) + : init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) {} + + /// Helper to initialize a tensor view + template + bool initialize_tensor(cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, uint64_t seed) { + if (dist_kind == cutlass::Distribution::Uniform) { + int scope = (cutlass::sizeof_bits::value == 8) ? 2 : 8; + cutlass::reference::host::TensorFillRandomUniform(view, seed, scope, + -scope, 0); + } else if (dist_kind == cutlass::Distribution::Gaussian) { + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, -1); + } else if (dist_kind == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(view); + } else if (dist_kind == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(view.data(), + view.capacity()); + } else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Waives test if CUDA device is insufficient + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run(cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) { + + // Waives test if CUDA device is insufficient + if (!sufficient()) { + return true; + } + + // + // Allocate the GEMM workspace + // + + cutlass::HostTensor + tensor_A(problem_size.mk()); + + cutlass::HostTensor + tensor_B(problem_size.kn()); + + cutlass::HostTensor + tensor_C(problem_size.mn()); + + cutlass::HostTensor + tensor_D(problem_size.mn()); + + cutlass::HostTensor + reference_D(problem_size.mn(), false); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); + + cutlass::reference::host::TensorCopy(reference_D.host_view(), + tensor_C.host_view()); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + problem_size, tensor_A.device_ref(), tensor_B.device_ref(), + tensor_C.device_ref(), tensor_D.device_ref(), {alpha, beta}}; + + Gemm gemm_op; + + cutlass::Status status = gemm_op.initialize(arguments); + + if (status != cutlass::Status::kSuccess) { + cudaError_t error = cudaGetLastError(); + std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; + return true; + } + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess); + + // + // Verify + // + + cutlass::reference::host::Gemm< + typename Gemm::ElementA, typename Gemm::LayoutA, + typename Gemm::ElementB, typename Gemm::LayoutB, + typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute, + ElementAccumulator, typename Gemm::Operator> + reference_gemm; + + reference_gemm( + problem_size, alpha, tensor_A.host_ref(), tensor_B.host_ref(), beta, + reference_D.host_ref(), ElementAccumulator(0)); + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + bool passed = cutlass::reference::host::TensorEquals( + reference_D.host_view(), tensor_D.host_view()); + + EXPECT_TRUE(passed); + if (!passed) { + std::stringstream fname; + + fname << "error_Gemm_device_" << problem_size.m() << "x" + << problem_size.n() << "x" << problem_size.k() << "_" + << Gemm::ThreadblockShape::kM << "x" << Gemm::ThreadblockShape::kN + << "x" << Gemm::ThreadblockShape::kK << "_" << Gemm::WarpShape::kM + << "x" << Gemm::WarpShape::kN << "x" << Gemm::WarpShape::kK + << ".txt"; + + std::ofstream file(fname.str()); + + file << "problem: " << problem_size << ", alpha: " << alpha + << ", beta: " << beta << "\n\n"; + + file << "A =\n" + << tensor_A.host_view() << "\nB =\n" + << tensor_B.host_view() << "\nC =\n" + << tensor_C.host_view() << "\n\nReference =\n" + << reference_D.host_view() << "\nComputed =\n" + << tensor_D.host_view(); + } + + return passed; + } + + /// Runs a set of problem sizes + bool run_all() { + bool passed = true; + + int problem_size_m[] = {16, 528}; + + int problem_size_n[] = {16, 528}; + + int problem_size_k[] = {Gemm::InstructionShape::kK, + Gemm::ThreadblockShape::kK * Gemm::kStages + + Gemm::InstructionShape::kK}; + + double problem_alpha[] = {1.0}; + + // TODO Try non zero beta value after multistaged epilogue is implemented + double problem_beta[] = {0.0}; + + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + for (double alpha : problem_alpha) { + for (double beta : problem_beta) { + passed = + run({m, n, k}, ElementCompute(alpha), ElementCompute(beta)); + + if (!passed) { + return false; + } + } + } + } + } + } + + return true; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/multistage_testbed_interleaved.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/multistage_testbed_interleaved.h new file mode 100644 index 0000000000000000000000000000000000000000..2556c0174ba78a1f44541ec9e1534f0624aef444 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/multistage_testbed_interleaved.h @@ -0,0 +1,348 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/host_reorder.h" + +namespace test { +namespace gemm { +namespace device { + +//////////////////////////////////////////////////////////////////////////////// + +template +struct MultistageInterleavedTestbed { + + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using ElementC = typename Gemm::ElementC; + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + // + // Methods + // + + MultistageInterleavedTestbed( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, 2, -2, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerMultiprocessor < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + // + // Allocate the GEMM workspace + // + + cutlass::HostTensor< + typename Gemm::ElementA, + typename Gemm::LayoutA> tensor_A(problem_size.mk()); + + cutlass::HostTensor< + typename Gemm::ElementB, + typename Gemm::LayoutB> tensor_B(problem_size.kn()); + + cutlass::HostTensor< + typename Gemm::ElementB, + typename Gemm::LayoutB> tensor_B_reordered(problem_size.kn()); + + cutlass::HostTensor< + typename Gemm::ElementC, + typename Gemm::LayoutC> tensor_C(problem_size.mn()); + + cutlass::HostTensor< + typename Gemm::ElementC, + typename Gemm::LayoutC> tensor_D(problem_size.mn()); + + cutlass::HostTensor< + typename Gemm::ElementC, + typename Gemm::LayoutC> reference_D(problem_size.mn(), false); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); + + cutlass::reorder_column( + tensor_B_reordered.host_ref(), tensor_B.host_ref(), problem_size); + + cutlass::reference::host::TensorCopy( + reference_D.host_view(), + tensor_C.host_view()); + + tensor_A.sync_device(); + tensor_B_reordered.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + problem_size, + tensor_A.device_ref(), + tensor_B_reordered.device_ref(), + tensor_C.device_ref(), + tensor_D.device_ref(), + {alpha, beta} + }; + + Gemm gemm_op; + + cutlass::Status status = gemm_op.initialize(arguments); + + EXPECT_TRUE(status == cutlass::Status::kSuccess); + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess); + + // + // Verify + // + + cutlass::reference::host::Gemm< + typename Gemm::ElementA, typename Gemm::LayoutA, + typename Gemm::ElementB, typename Gemm::LayoutB, + typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute, + ElementAccumulator, typename Gemm::Operator> + reference_gemm; + + reference_gemm( + problem_size, + alpha, + tensor_A.host_ref(), + tensor_B.host_ref(), + beta, + reference_D.host_ref(), + ElementAccumulator(0) + ); + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + bool passed = cutlass::reference::host::TensorEquals( + reference_D.host_view(), + tensor_D.host_view()); + + EXPECT_TRUE(passed); + if (!passed) { + + std::stringstream fname; + + fname << "error_Gemm_device_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << Gemm::ThreadblockShape::kM << "x" + << Gemm::ThreadblockShape::kN << "x" + << Gemm::ThreadblockShape::kK << "_" + << Gemm::WarpShape::kM << "x" + << Gemm::WarpShape::kN << "x" + << Gemm::WarpShape::kK << ".txt"; + + std::ofstream file(fname.str()); + + file + << "problem: " << problem_size + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << tensor_A.host_view() + << "\nB =\n" << tensor_B.host_view() + << "\nB_reordered =\n" << tensor_B_reordered.host_view() + << "\nC =\n" << tensor_C.host_view() + << "\n\nReference =\n" << reference_D.host_view() + << "\nComputed =\n" << tensor_D.host_view(); + } + + return passed; + } + + /// Runs a set of problem sizes + bool run_all() { + bool passed = true; + + int problem_size_m[] = { + InterleavedK, 512 + InterleavedK + }; + + int problem_size_n[] = { + InterleavedK, 512 + InterleavedK + }; + + int problem_size_k[] = { + InterleavedK, Gemm::ThreadblockShape::kK * Gemm::kStages + InterleavedK + }; + + double problem_alpha[] = { + 1.0 + }; + + double problem_beta[] = { + 0.0 + }; + + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + for (double alpha : problem_alpha) { + for (double beta : problem_beta) { + + passed = run( + {m, n, k}, + ElementCompute(alpha), + ElementCompute(beta) + ); + + if (!passed) { + return false; + } + } + } + } + } + } + + return true; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..51632bb66d5d01e11a1ac6ab1fbd4f39a95b7b83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nn_sm50.cu @@ -0,0 +1,1131 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_cgemm_nn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_cgemm_nn, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..512fcbcc58868ffbb5d8f437f99449f1f16703fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nt_sm50.cu @@ -0,0 +1,1311 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_cgemm_nt, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 16x128x16_8x32x1_2x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_cgemm_nt, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 32x128x16_8x32x1_2x4_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_nt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 128x32x16_32x8x1_4x2_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_nt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nt_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nt_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..805937a3b2d161530beaddc946fbf463b87f2070 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_nt_sm80.cu @@ -0,0 +1,265 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_complex.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_complex.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_simt_cf32, 32x64x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_simt_cf32, 64x64x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_simt_cf32, 128x128x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_simt_cf32, 64x128x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_simt_cf32, 128x64x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 8>, + cutlass::gemm::GemmShape<64, 32, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_simt_cf32, 128x128x8_64x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32n_cf32t_cf32t_simt_cf32, 128x256x8_64x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 8>, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..74058029cbdd51c930254e112d34161470fe0bfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tn_sm50.cu @@ -0,0 +1,1131 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_cgemm_tn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_cgemm_tn, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tn_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tn_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..cfb3764398ada6805297e313659a58088d994417 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tn_sm80.cu @@ -0,0 +1,269 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm_complex.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_complex.h" + + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_simt_cf32, 32x64x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<32, 32, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_simt_cf32, 64x64x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_simt_cf32, 128x128x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_simt_cf32, 64x128x8_32x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_simt_cf32, 128x64x8_64x32x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 8>, + cutlass::gemm::GemmShape<64, 32, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_simt_cf32, 128x128x8_64x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +TEST(SM80_Device_Gemm_cf32t_cf32n_cf32t_simt_cf32, 128x256x8_64x64x1) { + + using Element = cutlass::complex; + + using Gemm = cutlass::gemm::device::GemmComplex< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 8>, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmComplex()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..3c232f12c597850cc7114d3bf426a53f94c6594d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_cgemm_tt_sm50.cu @@ -0,0 +1,1130 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_cgemm_tt, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_cgemm_tt, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_cgemm_tt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_cgemm_tt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_nn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_nn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..f65fd01775270c7d5fa9cc87c3e55d6b406338f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_nn_sm50.cu @@ -0,0 +1,991 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_nn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_nn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_affin2_nn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using LayoutA = cutlass::layout::AffineRank2ColumnMajor; + using LayoutB = cutlass::layout::AffineRank2ColumnMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, LayoutA, + precision, LayoutB, + precision, LayoutC, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_nn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_nn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_tn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_tn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..9205761b8f39138a31fdff3411bdca0e5a935eed --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_tn_sm50.cu @@ -0,0 +1,991 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_tn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_tn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_affine2_tn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using LayoutA = cutlass::layout::AffineRank2RowMajor; + using LayoutB = cutlass::layout::AffineRank2ColumnMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, LayoutA, + precision, LayoutB, + precision, LayoutC, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_tt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_tt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..b6359784f0928e2e263ab095352a1daf71b0b2fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_dgemm_tt_sm50.cu @@ -0,0 +1,991 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_tt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_affine2_tt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using LayoutA = cutlass::layout::AffineRank2ColumnMajor; + using LayoutB = cutlass::layout::AffineRank2ColumnMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, LayoutA, + precision, LayoutB, + precision, LayoutC, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_dgemm_tt, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_dgemm_tt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_dgemm_tt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = double; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_f8gemm_tn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_f8gemm_tn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..a10a604db51433d026776e16fe07b768106de000 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_f8gemm_tn_sm50.cu @@ -0,0 +1,87 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// + +#if (__CUDACC_VER_MAJOR__ > 11) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) + +TEST(SM50_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_simt_f32, 32x64x8_32x64x1) { + + using ElementA = cutlass::float_e4m3_t; + using ElementB = cutlass::float_e4m3_t; + using ElementC = cutlass::float_e4m3_t; + using ElementAccumulator = float; + + using Gemm = cutlass::gemm::device::Gemm< + ElementA, + cutlass::layout::RowMajor, + ElementB, + cutlass::layout::ColumnMajor, + ElementC, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementC>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<> + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} + +#endif + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_nn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_nn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..b39930316197022ba7090877521095862e3bf880 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_nn_sm50.cu @@ -0,0 +1,2181 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 32x128x8_32x128x1_8x16_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_nn, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 64x64x8_64x64x1_16x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 128x32x8_128x32x1_16x8_8x4_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 32x256x8_32x128x1_8x16_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_nn, 64x128x8_64x64x1_16x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 128x64x8_64x64x1_16x8_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 256x32x8_128x32x1_16x8_8x4_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 64x256x8_32x128x1_8x16_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 128x128x8_64x64x1_16x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 256x64x8_128x32x1_16x8_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 16x128x16_8x32x1_2x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 128x256x8_64x64x1_16x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_nn, 256x128x8_64x64x1_16x8_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 32x128x16_8x32x1_2x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x32x16_32x8x1_4x2_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 128x256x8_32x64x1_8x8_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nn, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nn, 256x128x8_64x32x1_8x8_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_nt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_nt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..d414a7bc404eacc6d6447367d596cea01fa5b164 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_nt_sm50.cu @@ -0,0 +1,2181 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 32x128x8_32x128x1_8x16_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_nt, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 64x64x8_64x64x1_16x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 128x32x8_128x32x1_16x8_8x4_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 32x256x8_32x128x1_8x16_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_nt, 64x128x8_64x64x1_16x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 128x64x8_64x64x1_16x8_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 256x32x8_128x32x1_16x8_8x4_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 64x256x8_32x128x1_8x16_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 128x128x8_64x64x1_16x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 256x64x8_128x32x1_16x8_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 16x128x16_8x32x1_2x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 128x256x8_64x64x1_16x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_nt, 256x128x8_64x64x1_16x8_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 32x128x16_8x32x1_2x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x32x16_32x8x1_4x2_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 128x256x8_32x64x1_8x8_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_nt, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_nt, 256x128x8_64x32x1_8x8_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_tn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_tn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..2891c9782f9fa9b6374f58b1bf3c2e38c511bd3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_hgemm_tn_sm50.cu @@ -0,0 +1,2181 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 32x128x8_32x128x1_8x16_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_tn, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 64x64x8_64x64x1_16x8_4x8_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 128x32x8_128x32x1_16x8_8x4_1x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 32x256x8_32x128x1_8x16_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_tn, 64x128x8_64x64x1_16x8_4x8_1x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 128x64x8_64x64x1_16x8_4x8_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 256x32x8_128x32x1_16x8_8x4_2x1, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 16 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 64x256x8_32x128x1_8x16_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 128, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 128x128x8_64x64x1_16x8_4x8_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 256x64x8_128x32x1_16x8_8x4_2x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<128, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 16x128x16_8x32x1_2x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 128x256x8_64x64x1_16x8_4x8_2x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 16 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_hgemm_tn, 256x128x8_64x64x1_16x8_4x8_4x2, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 32x128x16_8x32x1_2x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x32x16_32x8x1_4x2_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 128x256x8_32x64x1_8x8_4x8_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_hgemm_tn, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_hgemm_tn, 256x128x8_64x32x1_8x8_8x4_4x4, { + using precision = cutlass::half_t; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_nt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_nt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..64391a40fb55adef79a1b1271a3d32709b116a07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_nt_sm50.cu @@ -0,0 +1,1761 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_igemm_nt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_igemm_nt, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 16x128x16_8x32x1_2x4_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_nt, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 32x128x16_8x32x1_2x4_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 128x32x16_32x8x1_4x2_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_nt, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_tn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_tn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..9e6c8419cfe4a187a029f76086226522d9c819b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_tn_sm50.cu @@ -0,0 +1,1671 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_igemm_tn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_igemm_tn, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tn, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tn, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_tt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_tt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..87c797636deba05c90f6bceafa4a10f70cfc883a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_igemm_tt_sm50.cu @@ -0,0 +1,1731 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_igemm_tt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_igemm_tt, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 16x128x16_8x32x1_2x4_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_igemm_tt, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 32x128x16_8x32x1_2x4_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_igemm_tt, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = int; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_int8_igemm_sm61.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_int8_igemm_sm61.cu new file mode 100644 index 0000000000000000000000000000000000000000..22729f492a293b1cdf10ce6a71c28dc10ee206f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_int8_igemm_sm61.cu @@ -0,0 +1,161 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#define N cutlass::layout::ColumnMajor +#define T cutlass::layout::RowMajor + +#define RUN_GEMM(X, Y) \ + using ElementOutput = int8_t; \ + using ElementAccumulator = int32_t; \ + using ElementCompute = float; \ + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; \ + using Gemm = cutlass::gemm::device::Gemm< \ + int8_t, \ + X, \ + int8_t, \ + Y, \ + ElementOutput, \ + cutlass::layout::RowMajor, \ + int32_t, \ + cutlass::arch::OpClassSimt, \ + cutlass::arch::Sm61, \ + ThreadBlockShape, \ + WarpShape, \ + InstructionShape, \ + cutlass::epilogue::thread::LinearCombinationClamp< \ + ElementOutput, \ + 1, \ + ElementAccumulator, \ + ElementCompute \ + >, \ + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, \ + 2 \ + >; \ + EXPECT_TRUE(test::gemm::device::TestAllGemm()); + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8n_s8t_simt_op_dp4a, 64x64x16_64x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + RUN_GEMM(N, T) +} + +TEST(SM61_Device_Gemm_s8n_s8t_simt_op_dp4a, 256x128x64_64x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + RUN_GEMM(N, T) +} + +TEST(SM61_Device_Gemm_s8n_s8t_simt_op_dp4a, 256x256x16_128x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 256, 16>; + using WarpShape = cutlass::gemm::GemmShape<128, 64, 16>; + RUN_GEMM(N, T) +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8t_s8n_simt_op_dp4a, 64x64x16_64x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + RUN_GEMM(T, N) +} + +TEST(SM61_Device_Gemm_s8t_s8n_simt_op_dp4a, 256x128x64_64x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + RUN_GEMM(T, N) +} + +TEST(SM61_Device_Gemm_s8t_s8n_simt_op_dp4a, 256x256x16_128x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 256, 16>; + using WarpShape = cutlass::gemm::GemmShape<128, 64, 16>; + RUN_GEMM(T, N) +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8n_s8n_simt_op_dp4a, 64x64x16_64x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + RUN_GEMM(N, N) +} + +TEST(SM61_Device_Gemm_s8n_s8n_simt_op_dp4a, 256x128x64_64x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + RUN_GEMM(N, N) +} + +TEST(SM61_Device_Gemm_s8n_s8n_simt_op_dp4a, 256x256x16_128x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 256, 16>; + using WarpShape = cutlass::gemm::GemmShape<128, 64, 16>; + RUN_GEMM(N, N) +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8t_s8t_simt_op_dp4a, 64x64x16_64x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + RUN_GEMM(T, T) +} + +TEST(SM61_Device_Gemm_s8t_s8t_simt_op_dp4a, 256x128x64_64x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + RUN_GEMM(T, T) +} + +TEST(SM61_Device_Gemm_s8t_s8t_simt_op_dp4a, 256x256x16_128x64x4) { + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 256, 16>; + using WarpShape = cutlass::gemm::GemmShape<128, 64, 16>; + RUN_GEMM(T, T) +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_int8_igemm_sm61_sliced_k.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_int8_igemm_sm61_sliced_k.cu new file mode 100644 index 0000000000000000000000000000000000000000..bd0a1f8699d402cbcce214d6669d3c2a8498a16d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_int8_igemm_sm61_sliced_k.cu @@ -0,0 +1,307 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8n_s8t_simt_op_dp4a_sliced_k, 32x32x128_32x32x4) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajor, + int8_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + int32_t, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm61, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<1, 1, 4>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM61_Device_Gemm_s8n_s8t_simt_op_dp4a_sliced_k, 32x64x128_32x32x4) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajor, + int8_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + int32_t, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm61, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<1, 1, 4>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM61_Device_Gemm_s8t_s8n_simt_op_dp4a_sliced_k, 32x32x128_32x32x4) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + int32_t, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm61, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<1, 1, 4>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM61_Device_Gemm_s8t_s8n_simt_op_dp4a_sliced_k, 32x64x128_32x32x4) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + int32_t, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm61, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<1, 1, 4>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM61_Device_Gemm_s8t_s8t_simt_op_dp4a_sliced_k, 32x32x128_32x32x4) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + int32_t, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm61, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<1, 1, 4>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM61_Device_Gemm_s8t_s8t_simt_op_dp4a_sliced_k, 32x64x128_32x32x4) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + int32_t, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm61, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<1, 1, 4>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM61_Device_Gemm_s8n_s8n_simt_op_dp4a_sliced_k, 32x32x128_32x32x4) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + int32_t, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm61, + cutlass::gemm::GemmShape<32, 32, 128>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<1, 1, 4>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM61_Device_Gemm_s8n_s8n_simt_op_dp4a_sliced_k, 32x64x128_32x32x4) { + + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementCompute = float; + + using Gemm = cutlass::gemm::device::Gemm< + int8_t, + cutlass::layout::ColumnMajor, + int8_t, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + int32_t, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm61, + cutlass::gemm::GemmShape<32, 64, 128>, + cutlass::gemm::GemmShape<32, 32, 64>, + cutlass::gemm::GemmShape<1, 1, 4>, + cutlass::epilogue::thread::LinearCombinationClamp< + ElementOutput, + 1, + ElementAccumulator, + ElementCompute + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_nn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_nn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..10889bb495ee16197f108887fab96b0ef40f5e82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_nn_sm50.cu @@ -0,0 +1,861 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_qgemm_nn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_qgemm_nn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_nt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_nt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..f3d0a78e69472f2b4a2d64e62c264cb654c26ddb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_nt_sm50.cu @@ -0,0 +1,861 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_qgemm_nt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_qgemm_nt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_nt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_nt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_tn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_tn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..ed0f74d0fac776075b5691440b1811a753cf9321 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_tn_sm50.cu @@ -0,0 +1,861 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_qgemm_tn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_qgemm_tn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_tt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_tt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..c8127c5e54a573c5f6b9c0028979c65e8735be93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_qgemm_tt_sm50.cu @@ -0,0 +1,861 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_qgemm_tt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_qgemm_tt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_qgemm_tt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_qgemm_tt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = cutlass::Quaternion; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemmBasic()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..f48e9e1b6c652f286d75cde0b9302ea9c749943e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nn_sm50.cu @@ -0,0 +1,1740 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_nn, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_nn, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_affine2_nn, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using LayoutA = cutlass::layout::AffineRank2ColumnMajor; + using LayoutB = cutlass::layout::AffineRank2ColumnMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, LayoutA, + precision, LayoutB, + precision, LayoutC, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nn, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 128x32x16_32x8x1_4x2_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nn, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..69058bbd1e8dc75a94254d07cfe2e85e1f1dd63b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nt_sm50.cu @@ -0,0 +1,1800 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_nt, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 16x128x16_8x32x1_2x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_nt, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_affine2_nt, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using LayoutA = cutlass::layout::AffineRank2ColumnMajor; + using LayoutB = cutlass::layout::AffineRank2RowMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, LayoutA, + precision, LayoutB, + precision, LayoutC, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_nt, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 32x128x16_8x32x1_2x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 128x32x16_32x8x1_4x2_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_nt, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nt_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nt_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..fda68e53d5e79b2ae69570fe7040f7c35a099e04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_nt_sm80.cu @@ -0,0 +1,296 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f32n_f32t_f32t_simt_f32, 32x64x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32n_f32t_f32t_simt_f32, 64x64x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32n_f32t_f32t_simt_f32, 128x128x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32an_f32at_f32at_simt_f32, 128x128x8_32x64x1) { + + using Element = float; + using LayoutA = cutlass::layout::AffineRank2ColumnMajor; + using LayoutB = cutlass::layout::AffineRank2RowMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + LayoutA, + Element, + LayoutB, + Element, + LayoutC, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C )); + +} + +TEST(SM80_Device_Gemm_f32n_f32t_f32t_simt_f32, 64x128x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32n_f32t_f32t_simt_f32, 128x64x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 8>, + cutlass::gemm::GemmShape<64, 32, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + + +TEST(SM80_Device_Gemm_f32n_f32t_f32t_simt_f32, 128x128x8_64x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32n_f32t_f32t_simt_f32, 128x256x8_64x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 8>, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..b67aa2310bbbf2dee0fb387c2805cba813cdd79d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tn_sm50.cu @@ -0,0 +1,1710 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_tn, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_tn, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_affine2_tn, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using LayoutA = cutlass::layout::AffineRank2RowMajor; + using LayoutB = cutlass::layout::AffineRank2ColumnMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, LayoutA, + precision, LayoutB, + precision, LayoutC, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tn, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tn, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tn_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tn_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..202c5a1f8ecfcb90bbfd7fa558d9806667ee87d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tn_sm80.cu @@ -0,0 +1,296 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed.h" + + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) +//////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Gemm_f32t_f32n_f32t_simt_f32, 32x64x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32t_f32n_f32t_simt_f32, 64x64x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32t_f32n_f32t_simt_f32, 128x128x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32at_f32an_f32t_simt_f32, 128x128x8_32x64x1) { + + using Element = float; + using LayoutA = cutlass::layout::AffineRank2RowMajor; + using LayoutB = cutlass::layout::AffineRank2ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + LayoutA, + Element, + LayoutB, + Element, + LayoutC, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {1}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm( stride_factor_A, stride_factor_B, stride_factor_C )); +} + +TEST(SM80_Device_Gemm_f32t_f32n_f32t_simt_f32, 64x128x8_32x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 8>, + cutlass::gemm::GemmShape<32, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32t_f32n_f32t_simt_f32, 128x64x8_64x32x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 8>, + cutlass::gemm::GemmShape<64, 32, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32t_f32n_f32t_simt_f32, 128x128x8_64x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 8>, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +TEST(SM80_Device_Gemm_f32t_f32n_f32t_simt_f32, 128x256x8_64x64x1) { + + using Element = float; + + using Gemm = cutlass::gemm::device::Gemm< + Element, + cutlass::layout::RowMajor, + Element, + cutlass::layout::ColumnMajor, + Element, + cutlass::layout::RowMajor, + Element, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 8>, + cutlass::gemm::GemmShape<64, 64, 8>, + cutlass::gemm::GemmShape<1, 1, 1>, + cutlass::epilogue::thread::LinearCombination< + Element, + 1, + Element, + Element>, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..82b07736ecbe4950cb0556496d32d967dedb1264 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sgemm_tt_sm50.cu @@ -0,0 +1,1770 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 16x64x8_16x64x1_4x8_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 32x32x8_32x32x1_8x4_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 32x64x8_32x64x1_8x8_4x8_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 64x32x8_64x32x1_8x8_8x4_1x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 16x128x8_16x64x1_4x8_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x64x8_32x32x1_8x4_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 32x128x8_32x64x1_8x8_4x8_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_tt, 64x64x8_64x32x1_8x8_8x4_1x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x32x8_32x32x1_8x4_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 64x64x8_32x64x1_8x8_4x8_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 1 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 128x32x8_64x32x1_8x8_8x4_2x1, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x128x8_16x64x1_4x8_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x64x8_32x32x1_8x4_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 64x128x8_32x64x1_8x8_4x8_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 128x32x8_64x16x1_8x4_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 128x64x8_64x32x1_8x8_8x4_2x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 16x128x16_8x32x1_2x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 32x256x8_16x64x1_4x8_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x128x8_32x32x1_8x4_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 64x256x8_32x64x1_8x8_4x8_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_tt, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L0(SM50_device_sgemm_affine2_tt, 128x128x8_64x32x1_8x8_8x4_2x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using LayoutA = cutlass::layout::AffineRank2ColumnMajor; + using LayoutB = cutlass::layout::AffineRank2ColumnMajor; + using LayoutC = cutlass::layout::AffineRankN<2>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, LayoutA, + precision, LayoutB, + precision, LayoutC, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + + typename LayoutA::Stride::Index stride_factor_A[] = {3, 4}; + typename LayoutB::Stride::Index stride_factor_B[] = {5, 6}; + typename LayoutC::Stride::Index stride_factor_C[] = {7, 8}; + + EXPECT_TRUE(test::gemm::device::TestAllGemm(stride_factor_A, stride_factor_B, stride_factor_C)); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 128x64x8_32x32x1_8x4_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 128x128x8_32x64x1_8x8_4x8_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 256x32x8_64x16x1_8x4_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 8 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_sgemm_tt, 256x64x8_64x32x1_8x8_8x4_4x2, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 128 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 32x128x16_8x32x1_2x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x128x8_16x32x1_4x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 8 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 256 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 64x256x8_16x64x1_4x8_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 128x64x8_32x16x1_4x4_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 128 x 128 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 128x128x8_32x32x1_8x4_4x8_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 8 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 256 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_sgemm_tt, 256x64x8_64x16x1_8x4_8x4_4x4, { + using precision = float; + using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sm50.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sm50.py new file mode 100644 index 0000000000000000000000000000000000000000..0c732d6e5e2b57552e140bf0603be2e4c48ba8cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_sm50.py @@ -0,0 +1,341 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# this file creates the test/unit/gemm/device simt tests + + +outputDir = "" + +################################################################################ +# parameters +# Edge - for tiles, the edges represent the length of one side +# Ratio - the maximum ratio between 2 edges, limits the skinnyness of tiles +# MaxEdge - maximum length of each edge +# Min/Max - minimum/maximum of the product of edge lengths +################################################################################ + +warpsPerThreadblockEdge = [1, 2, 4, 8, 16] +warpsPerThreadblockRatio = 2 +warpsPerThreadblockMax = 16 +# NOTE 1x32 and 2x16 warp tile shapes fail validation for ~10% of cases + +warpShapeEdges = [8, 16, 32, 64, 128, 256] +warpShapeRatio = 4 +warpShapeMax = 64*64 +warpShapeMin = 8*8 + +threadblockEdgeMax = 256 + +# char, type bits/elem, max tile, L0 threadblock tiles +precisions = [ + ["c", "cutlass::complex", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ], + ["q", "cutlass::Quaternion", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ], + ["d", "double", 64, 64*64, [ [ 64, 64], [ 32, 32] ] ], + ["h", "cutlass::half_t", 16, 128*256, [ [256, 128], [ 64, 128], [ 64, 32] ] ], + ["i", "int", 32, 128*128, [ [128, 64], [ 16, 32] ] ], + ["s", "float", 32, 128*128, [ [128, 256], [128, 128], [ 64, 64] ] ], + ["z", "cutlass::complex", 128, 64*64, [ [ 32, 64], [ 16, 32] ] ], + ] +# L1 will have a single kernel for every unique shape +# L2 will have everything else + +transposes = [ + [False, False], + [False, True], + [True, False], + [True, True] + ] + +################################################################################ +# warps per threadblock +################################################################################ +warpsPerThreadblocks = [] +for warpsPerThreadblock0 in warpsPerThreadblockEdge: + for warpsPerThreadblock1 in warpsPerThreadblockEdge: + if warpsPerThreadblock0 / warpsPerThreadblock1 <= warpsPerThreadblockRatio and warpsPerThreadblock1 / warpsPerThreadblock0 <= warpsPerThreadblockRatio and warpsPerThreadblock0 * warpsPerThreadblock1 <= warpsPerThreadblockMax: + warpsPerThreadblocks.append([warpsPerThreadblock0, + warpsPerThreadblock1]) +print("WarpsPerThreadblocks",warpsPerThreadblocks) + +################################################################################ +# warp shapes +################################################################################ +warpNumThreads = 32 +warpShapes = [] +for warp0 in warpShapeEdges: + for warp1 in warpShapeEdges: + if warp0 / warp1 <= warpShapeRatio and warp1 / warp0 <= warpShapeRatio and warp0*warp1 <= warpShapeMax and warp0*warp1 > warpShapeMin: + warpShapes.append([warp0, warp1]) +print("WarpShapes", warpShapes) + +numL0 = 0 +numL1 = 0 +numL2 = 0 + +################################################################################ +# create kernels +# create a file for each precision/transpose +# each file contains many tile sizes +################################################################################ + +# precisions +for precision in precisions: + + # get precision char + precisionChar = precision[0] + precisionType = precision[1] + precisionBits = precision[2] + threadblockMaxElements = precision[3] + threadblockTilesL0 = precision[4] + + # transposes + for transpose in transposes: + + # get transpose char + columnMajorA = transpose[0] + columnMajorB = transpose[1] + transCharA = "n" if columnMajorA else "t" + transCharB = "n" if columnMajorB else "t" + + # open file + fileName="simt_%sgemm_%s%s_sm50.cu" % (precisionChar, transCharA, transCharB) + print("\n", fileName) + filePath = "%s%s" % (outputDir, fileName) + out = open(filePath, "w+") + + # write file header + out.write("/***************************************************************************************************\n" +" * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. \n" +" * SPDX-License-Identifier: BSD-3-Clause \n" +" * \n" +" * Redistribution and use in source and binary forms, with or without \n" +" * modification, are permitted provided that the following conditions are met: \n" +" * \n" +" * 1. Redistributions of source code must retain the above copyright notice, this \n" +" * list of conditions and the following disclaimer. \n" +" * \n" +" * 2. Redistributions in binary form must reproduce the above copyright notice, \n" +" * this list of conditions and the following disclaimer in the documentation \n" +" * and/or other materials provided with the distribution. \n" +" * \n" +" * 3. Neither the name of the copyright holder nor the names of its \n" +" * contributors may be used to endorse or promote products derived from \n" +" * this software without specific prior written permission. \n" +" * \n" +" * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n" +" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n" +" * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \n" +" * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \n" +" * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \n" +" * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \n" +" * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \n" +" * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \n" +" * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \n" +" * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \n" +" *\n" +" **************************************************************************************************/\n" +"/*! \\file\n" +" \\brief Tests for device-wide GEMM interface\n" +"*/\n" +"\n" +"#include \n" +"\n" +"#include \"cutlass/cutlass.h\"\n" +"#include \"cutlass/gemm/device/gemm.h\"\n" +"#include \"cutlass/numeric_types.h\"\n" +"\n" +"#include \"../../common/cutlass_unit_test.h\"\n" +"\n" +"#include \"cutlass/util/host_tensor.h\"\n" +"#include \"cutlass/util/tensor_view_io.h\"\n" +"#include \"cutlass/util/reference/host/tensor_fill.h\"\n" +"#include \"cutlass/util/reference/host/tensor_copy.h\"\n" +"#include \"cutlass/util/reference/host/tensor_compare.h\"\n" +"#include \"cutlass/util/reference/host/gemm.h\"\n" +"\n" +"#include \"testbed.h\"\n" +"\n") + foundThreadblockTilesL0 = {} + foundThreadblockTilesL1 = {} + + ######################################################################## + # for each combination of tile sizes + ######################################################################## + for warpsPerThreadblock in warpsPerThreadblocks: + for warpShape in warpShapes: + warpThreadsM = 0 + if warpShape[0] > warpShape[1]: + warpThreadsM = 8 + else: + warpThreadsM = 4 + warpThreadsN = warpNumThreads / warpThreadsM + + # skip shapes with conflicting rectangularity + # they are unlikely to be fastest + blockG = warpsPerThreadblock[0] > warpsPerThreadblock[1] + blockL = warpsPerThreadblock[0] < warpsPerThreadblock[1] + warpG = warpShape[0] > warpShape[1] + warpL = warpShape[0] < warpShape[1] + + blockG2 = warpsPerThreadblock[0] > warpsPerThreadblock[1]*2 + blockL2 = warpsPerThreadblock[0]*2 < warpsPerThreadblock[1] + warpG2 = warpShape[0] > warpShape[1]*2 + warpL2 = warpShape[0]*2 < warpShape[1] + + if blockG2 and warpL: continue + if blockL2 and warpG: continue + if warpG2 and blockL: continue + if warpL2 and blockG: continue + + # check threadblock ratios and max + threadblockTile = [warpShape[0]*warpsPerThreadblock[0], + warpShape[1]*warpsPerThreadblock[1]] + if threadblockTile[0] * threadblockTile[1] > threadblockMaxElements: continue + if threadblockTile[0] > threadblockEdgeMax: continue + if threadblockTile[1] > threadblockEdgeMax: continue + totalThreads = warpNumThreads*warpsPerThreadblock[0]*warpsPerThreadblock[1] + + # calculate unroll + # ensure that every iteration at least a full load of A,B are done + unrollMin = 8 + unrollMin0 = totalThreads / threadblockTile[0] + unrollMin1 = totalThreads / threadblockTile[1] + unroll = max(unrollMin, unrollMin0, unrollMin1) + + threadTileM = warpShape[0] / warpThreadsM + threadTileN = warpShape[1] / warpThreadsN + if threadTileM < 2 or threadTileN < 2: continue + if threadTileM*threadTileN*precisionBits > 8*8*32: continue + + # epilogue currently only supports N < WarpNumThreads + if threadblockTile[1] < warpNumThreads: continue + + # limit smem + smemBitsA = threadblockTile[0]*unroll*2*precisionBits + smemBitsB = threadblockTile[1]*unroll*2*precisionBits + smemKBytes = (smemBitsA+smemBitsB)/8/1024 + if (smemKBytes > 48): continue + + # test level 0 + testLevel = -1 + for tileId in range(0, len(threadblockTilesL0)): + tbTile = threadblockTilesL0[tileId] + if tbTile[0] == threadblockTile[0] and tbTile[1] == threadblockTile[1]: + if tuple(tbTile) not in foundThreadblockTilesL0: + testLevel = 0 + numL0 += 1 + foundThreadblockTilesL0[tuple(tbTile)] = True + + # test level 1 + if testLevel < 0: + threadblockTileAlreadyUsed = False + if tuple(threadblockTile) not in foundThreadblockTilesL1: + testLevel = 1 + numL1 += 1 + foundThreadblockTilesL1[tuple(threadblockTile)] = True + + # test level 2 + if testLevel < 0: + testLevel = 2 + numL2 += 1 + + ################################################################ + # write this tile to file + ################################################################ + + print("%ix%ix%i__%ix%i_%ix%i_%ix%i L%i" % ( + threadblockTile[0], threadblockTile[1], unroll, + threadTileM, threadTileN, + warpThreadsM, warpThreadsN, + warpsPerThreadblock[0], warpsPerThreadblock[1], testLevel)) + + out.write("////////////////////////////////////////////////////////////////////////////////\n" + "// Elements / Thread: %3i x %3i\n" + "// Threads / Warp: %3i x %3i\n" + "// Warps / Block: %3i x %3i\n" + "// Threadblock: %3i x %3i x %2i\n" + % ( threadTileM, threadTileN, + warpThreadsM, warpThreadsN, + warpsPerThreadblock[0], warpsPerThreadblock[1], + threadblockTile[0], threadblockTile[1], unroll + ) + ) + + out.write("CUTLASS_TEST_L%i(SM50_device_%sgemm_%s%s, %ix%ix%i_%ix%ix1_%ix%i_%ix%i_%ix%i, {\n" % ( + testLevel, + precisionChar, + transCharA, + transCharB, + threadblockTile[0], + threadblockTile[1], + unroll, + warpShape[0], + warpShape[1], + threadTileM, + threadTileN, + warpThreadsM, + warpThreadsN, + warpsPerThreadblock[0], + warpsPerThreadblock[1] + )) + out.write(" using precision = %s;\n" % precisionType) + out.write(" using ThreadblockShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n" % ( + threadblockTile[0], + threadblockTile[1], + unroll)) + out.write(" using WarpShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n\n" % ( + warpShape[0], + warpShape[1], + unroll)) + out.write(" static int const kEpilogueElementsPerAccess = 1;\n" + " using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;\n" + " using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<\n" + " precision, kEpilogueElementsPerAccess, precision, precision>;\n\n") + + out.write(" using Gemm = cutlass::gemm::device::Gemm<\n" + " precision, cutlass::layout::%sMajor,\n" + " precision, cutlass::layout::%sMajor,\n" + " precision, cutlass::layout::RowMajor,\n" + " precision,\n" + " cutlass::arch::OpClassSimt,\n" + " cutlass::arch::Sm50,\n" + " ThreadblockShape, WarpShape, InstructionShape,\n" + " EpilogueOutputOp,\n" + " cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,\n" + " 2 // Stages\n" + " >;\n" % ( + "Column" if columnMajorA else "Row", + "Column" if columnMajorB else "Row", + )) + out.write(" EXPECT_TRUE(test::gemm::device::TestAllGemm());\n" + "} )\n\n") + + + out.close() +print("NumKernels:", numL0, numL1, numL2) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_nn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_nn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..fb268af001b88b086525bb84e65069a8ca65fc33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_nn_sm50.cu @@ -0,0 +1,801 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_zgemm_nn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_zgemm_nn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_nt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_nt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..0b1312a37c05587359761b2e5821398241393c8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_nt_sm50.cu @@ -0,0 +1,801 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_zgemm_nt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_zgemm_nt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_nt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_nt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_tn_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_tn_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..28dbb9ba7ed667b361f0059e1c1adc116ff8d756 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_tn_sm50.cu @@ -0,0 +1,801 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_zgemm_tn, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_zgemm_tn, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tn, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tn, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::ColumnMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_tt_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_tt_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..079e756ea2677846b874bd175a6dfbdb57831541 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/simt_zgemm_tt_sm50.cu @@ -0,0 +1,801 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/numeric_types.h" + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 8x32x8_8x32x1_2x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 1 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L0(SM50_device_zgemm_tt, 16x32x8_16x32x1_4x4_4x8_1x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 8x32x8_8x16x1_2x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 8 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 8x64x8_8x32x1_2x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 16x32x8_16x16x1_4x2_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 1 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 16x64x8_16x32x1_4x4_4x8_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 1 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 32x32x8_32x16x1_4x4_8x4_1x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 1 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 32x32x8_16x32x1_4x4_4x8_2x1, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 16x32x8_8x16x1_2x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 16 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 16x64x8_8x32x1_2x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 32x32x8_16x16x1_4x2_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 2 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L0(SM50_device_zgemm_tt, 32x64x8_16x32x1_4x4_4x8_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 64x32x8_32x16x1_4x4_8x4_2x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 16 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 16x64x16_8x16x1_2x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 32x32x8_16x8x1_2x2_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 32x64x8_16x16x1_4x2_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 2 x 4 +// Threadblock: 32 x 128 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 32x128x8_16x32x1_4x4_4x8_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 2 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 64x64x8_32x16x1_4x4_8x4_2x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 32 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 32x32x8_8x16x1_2x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 32 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 64x32x8_16x16x1_4x2_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 2 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 64x64x8_16x32x1_4x4_4x8_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 4 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 2 +// Threadblock: 128 x 32 x 8 +CUTLASS_TEST_L1(SM50_device_zgemm_tt, 128x32x8_32x16x1_4x4_8x4_4x2, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 32 x 64 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 32x64x16_8x16x1_2x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 2 x 2 +// Threads / Warp: 8 x 4 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 32 x 16 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 64x32x16_16x8x1_2x2_8x4_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + +//////////////////////////////////////////////////////////////////////////////// +// Elements / Thread: 4 x 2 +// Threads / Warp: 4 x 8 +// Warps / Block: 4 x 4 +// Threadblock: 64 x 64 x 8 +CUTLASS_TEST_L2(SM50_device_zgemm_tt, 64x64x8_16x16x1_4x2_4x8_4x4, { + using precision = cutlass::complex; + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; + + static int const kEpilogueElementsPerAccess = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + precision, kEpilogueElementsPerAccess, precision, precision>; + + using Gemm = cutlass::gemm::device::Gemm< + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, cutlass::layout::RowMajor, + precision, + cutlass::arch::OpClassSimt, + cutlass::arch::Sm50, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 2 // Stages + >; + EXPECT_TRUE(test::gemm::device::TestAllGemm()); +} ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm50_gemm_f32_f32_f32_simt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm50_gemm_f32_f32_f32_simt.cu new file mode 100644 index 0000000000000000000000000000000000000000..f7a18bc667b5a59ca73fbdf373ec4f59d3f99959 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm50_gemm_f32_f32_f32_simt.cu @@ -0,0 +1,135 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "default_gemm_configuration.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +using namespace cute; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemm_f32n_f32n_f32n_simt_f32, 128x128x64_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::ColumnMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemm_f32n_f32t_f32n_simt_f32, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + float, cutlass::layout::ColumnMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemm_f32t_f32n_f32n_simt_f32, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + float, cutlass::layout::RowMajor, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::ColumnMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemm_f32t_f32t_f32n_simt_f32, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, cutlass::layout::ColumnMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm50_gemm_f64_f64_f64_simt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm50_gemm_f64_f64_f64_simt.cu new file mode 100644 index 0000000000000000000000000000000000000000..421072fee9b604ecc9e103d846e30ff539c895dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm50_gemm_f64_f64_f64_simt.cu @@ -0,0 +1,134 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "default_gemm_configuration.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +using namespace cute; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemm_f64n_f64n_f64n_simt_f64, 128x128x64_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM50_Device_Gemm_f64n_f64t_f64n_simt_f64, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemm_f64t_f64n_f64n_simt_f64, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Device_Gemm_f64t_f64t_f64n_simt_f64, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + double, cutlass::layout::RowMajor, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm61_gemm_s8_s8_s32_simt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm61_gemm_s8_s8_s32_simt.cu new file mode 100644 index 0000000000000000000000000000000000000000..ba6456b57a5cba04f030629749dea304d88e5c94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm61_gemm_s8_s8_s32_simt.cu @@ -0,0 +1,136 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "default_gemm_configuration.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +using namespace cute; + +//#if defined(CUTLASS_ARCH_MMA_SM61_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8n_s8n_s32n_simt_s32, 128x128x64_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + int8_t, cutlass::layout::ColumnMajor, + int8_t, cutlass::layout::ColumnMajor, + int32_t, cutlass::layout::ColumnMajor, + int32_t>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8n_s8t_s32n_simt_s32, 128x128x64_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + int8_t, cutlass::layout::ColumnMajor, + int8_t, cutlass::layout::RowMajor, + int32_t, cutlass::layout::ColumnMajor, + int32_t>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8t_s8n_s32n_simt_s32, 128x128x64_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + int8_t, cutlass::layout::RowMajor, + int8_t, cutlass::layout::ColumnMajor, + int32_t, cutlass::layout::ColumnMajor, + int32_t>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_Device_Gemm_s8t_s8t_s32n_simt_s32, 128x128x64_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm50, + int8_t, cutlass::layout::RowMajor, + int8_t, cutlass::layout::RowMajor, + int32_t, cutlass::layout::ColumnMajor, + int32_t>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +//#endif // #if defined(CUTLASS_ARCH_MMA_SM61_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f16_f16_f32_tensor_op_f32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f16_f16_f32_tensor_op_f32.cu new file mode 100644 index 0000000000000000000000000000000000000000..40f7cdb29acd6d5fd6ef0ca4095d2e840a5541c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f16_f16_f32_tensor_op_f32.cu @@ -0,0 +1,136 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "default_gemm_configuration.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +using namespace cute; + +//#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// +#if 1 +TEST(SM80_Device_Gemm_f16t_f16n_f32t_tensor_op_f32_3x, 128x128x32_64x64x32) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::half_t, cutlass::layout::RowMajor, + cutlass::half_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} +#endif +///////////////////////////////////////////////////////////////////////////////////////////////// +#if 1 +TEST(SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32_3x, 128x128x32_64x64x32) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::half_t, cutlass::layout::ColumnMajor, + cutlass::half_t, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32_3x, 128x128x32_64x64x32) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::half_t, cutlass::layout::ColumnMajor, + cutlass::half_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f16t_f16t_f32t_tensor_op_f32_3x, 128x128x32_64x64x32) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::half_t, cutlass::layout::RowMajor, + cutlass::half_t, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} +#endif +///////////////////////////////////////////////////////////////////////////////////////////////// + +//#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f32_f32_f32_simt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f32_f32_f32_simt.cu new file mode 100644 index 0000000000000000000000000000000000000000..a7c6b522c55589787505b4e4a09192511d960f2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f32_f32_f32_simt.cu @@ -0,0 +1,135 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "default_gemm_configuration.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +using namespace cute; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f32n_f32n_f32n_simt_f32, 128x128x64_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm80, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::ColumnMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f32n_f32t_f32n_simt_f32, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm80, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + float, cutlass::layout::ColumnMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f32t_f32n_f32n_simt_f32, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm80, + float, cutlass::layout::RowMajor, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::ColumnMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f32t_f32t_f32n_simt_f32, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm80, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, cutlass::layout::ColumnMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f64_f64_f64_simt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f64_f64_f64_simt.cu new file mode 100644 index 0000000000000000000000000000000000000000..274b30cf82b3df99e0304f938d67d0fc9d2f9d3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f64_f64_f64_simt.cu @@ -0,0 +1,134 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "default_gemm_configuration.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +using namespace cute; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64n_f64n_f64n_simt_f64, 128x128x64_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm80, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Gemm_f64n_f64t_f64n_simt_f64, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm80, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64t_f64n_f64n_simt_f64, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm80, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64t_f64t_f64n_simt_f64, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassSimt, cutlass::arch::Sm80, + double, cutlass::layout::RowMajor, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f64_f64_f64_tensor_op_f64.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f64_f64_f64_tensor_op_f64.cu new file mode 100644 index 0000000000000000000000000000000000000000..e53a8e8122623ed56fdad66f536f62b703bcd45e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_f64_f64_f64_tensor_op_f64.cu @@ -0,0 +1,98 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "default_gemm_configuration.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +using namespace cute; + +//#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64n_f64t_f64n_tensor_op_f64, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_f64t_f64n_f64n_tensor_op_f64, 128x128x64_64x64x64) { + + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + double, cutlass::layout::RowMajor, + double, cutlass::layout::ColumnMajor, + double, cutlass::layout::ColumnMajor, + double>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// #endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_tf32_tf32_f32_tensor_op_f32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_tf32_tf32_f32_tensor_op_f32.cu new file mode 100644 index 0000000000000000000000000000000000000000..14654c781fc6aa7684419f3b31cd5c160f227272 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm80_gemm_tf32_tf32_f32_tensor_op_f32.cu @@ -0,0 +1,135 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "default_gemm_configuration.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +using namespace cute; + + +//#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32n_tf32n_f32n_tensor_op_f32, 128x128x32_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::tfloat32_t, cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32n_tf32t_f32n_tensor_op_f32, 128x128x32_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::tfloat32_t, cutlass::layout::ColumnMajor, + cutlass::tfloat32_t, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Gemm_tf32t_tf32n_f32n_tensor_op_f32, 128x128x32_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::tfloat32_t, cutlass::layout::RowMajor, + cutlass::tfloat32_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM80_Device_Gemm_tf32t_tf32t_f32n_tensor_op_f32, 128x128x32_64x64x64) { + using Config = cutlass::gemm::device::DefaultGemmConfigurationToCutlass3Types< + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, + cutlass::tfloat32_t, cutlass::layout::RowMajor, + cutlass::tfloat32_t, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + Config::CollectiveMainloop, + Config::CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +//#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_evt_operations.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_evt_operations.hpp new file mode 100644 index 0000000000000000000000000000000000000000..71425fee2cb0f47221458df4eaccdcee55ad260d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_evt_operations.hpp @@ -0,0 +1,457 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Host reference and operations for Sm90 EVT unit test +*/ +#pragma once +#include "gemm_testbed_3x_evt.hpp" + +////////////////////////////////////////////////////////////////////////////// +/// Host references used for testing +namespace test::gemm::device { +template +using HEVT = HostTreeVisitor; + +template +using HDAG = HostTopoVisitor; + +template +using HST = HostSplitTreeVisitor; + +/// D = alpha * acc + beta * C + AuxLoad +template +class HostEVTAuxLoad { +public: + using ScalarAlpha = HostScalarBroadcast; + using AccFetchNode = HostAccumulator; + using AuxLoadNode = HostAuxLoad; + using TernaryCompute0 = HEVT, ScalarAlpha, AccFetchNode, AuxLoadNode>; + using ScalarBeta = HostScalarBroadcast; + using CLoadNode = HostAuxLoad; + using TernaryCompute1 = HEVT, ScalarBeta, CLoadNode, TernaryCompute0>; + using EVTModule = HEVT, TernaryCompute1>; +}; + +/// D = alpha * acc + beta * C + per-column bias +template +class HostPerColBias { +public: + using ScalarAlpha = HostScalarBroadcast; + using AccFetchNode = HostAccumulator; + using RowBroadcastNode = HostRowBroadcast; + using TernaryCompute0 = HEVT, ScalarAlpha, AccFetchNode, RowBroadcastNode>; + using ScalarBeta = HostScalarBroadcast; + using CLoadNode = HostAuxLoad; + using TernaryCompute1 = HEVT, ScalarBeta, CLoadNode, TernaryCompute0>; + using EVTModule = HEVT, TernaryCompute1>; +}; + +/// D = beta * C + Graph(relu(alpha * acc + aux) + aux) +/// Testing EVT - DAG structure +template +class HostEVTDAG { +public: + using ScalarAlpha = HostScalarBroadcast; + using AccFetchNode = HostAccumulator; + using AuxLoadNode = HostAuxLoad; + using DAGNode = HDAG< + Gemm, + cute::tuple< + cute::tuple<>, // 0. alpha + cute::tuple<>, // 1. acc + cute::tuple<>, // 2. aux load + cute::tuple, // 3. alpha * acc + aux load + cute::tuple, // relu(alpha * acc + aux load) + cute::tuple // relu(alpha * acc + aux load) + aux load + >, + ScalarAlpha, + AccFetchNode, + AuxLoadNode, + HostCompute, + HostCompute, + HostCompute + >; + using ScalarBeta = HostScalarBroadcast; + using CLoadNode = HostAuxLoad; + using TernaryCompute1 = HEVT, ScalarBeta, CLoadNode, DAGNode>; + using EVTModule = HEVT, TernaryCompute1>; +}; + +/// EVT = alpha * acc + C +/// D = Graph(maximum(EVT + per-row bias, EVT)) +/// Testing DAG - EVT +template +class HostDAGEVT { +public: + using EVTNode = HEVT< + HostAuxStore, + HEVT< + HostCompute, + HostScalarBroadcast, + HostAccumulator, + HostAuxLoad + > + >; + using EVTModule = HEVT< + HostAuxStore, + HDAG< + Gemm, + cute::tuple< + cute::tuple<>, // 0. EVT + cute::tuple<>, // 1. per-row bias + cute::tuple, // 2. EVT + per-row bias + cute::tuple // 3. maximum(EVT + per-row bias, EVT) + >, + EVTNode, + HostColBroadcast, + HostCompute, + HostCompute + > + >; +}; + +/// Xreduce(alpha * acc + beta * C) +template class, class> class ReduceOp> +class HostReduce { +public: + using ScalarAlpha = HostScalarBroadcast; + using AccFetchNode = HostAccumulator; + using BinaryCompute0 = HEVT, ScalarAlpha, AccFetchNode>; + using ScalarBeta = HostScalarBroadcast; + using CLoadNode = HostAuxLoad; + using TernaryCompute1 = HEVT, ScalarBeta, CLoadNode, BinaryCompute0>; + using ReduceNode = HEVT, TernaryCompute1>; + using EVTModule = HEVT, ReduceNode>; +}; + +// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias +// if D is fp8 +// D = scale_d * activation(Z) +// else +// D = activation(Z) +template class ActivationFn, class ElementD> +class HostScaledLinCombPerRowBiasEltAct { +public: + using EVTModule = HEVT< + HostAuxStore, + HEVT< + HostCompute::Op>, // activation(Z) * scaled_d + HEVT< + HostCompute, // activation(Z) + HEVT< + HostCompute, + HostScalarBroadcast, // scale_c * beta + HostAuxLoad, // C + HEVT< + HostCompute, + HostScalarBroadcast, // scale_a * scale_b * alpha + HostAccumulator, + HostColBroadcast, + > + > + >, + HostScalarBroadcast, // scale_d + > + >; +}; + +// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias +// if D is fp8 +// amax_d = max(abs(elements in activation(Z))) +// D = scale_d * activation(Z) +// else +// D = activation(Z) +// if Aux is fp8 +// amax_aux = max(abs(elements in Z)) +// Aux = scale_aux * Z +// else +// Aux = Z +template class ActivationFn, class ElementD> +class HostScaledLinCombPerRowBiasEltActAmaxAux { +public: + template + using amax = cutlass::maximum_absolute_value_reduction; + using EVTModule = HEVT< + HostAuxStore, + HST, + HostScalarBroadcast, // scale_c * beta + HostAuxLoad, // C + HEVT< + HostCompute, + HostScalarBroadcast, // scale_a * scale_b * alpha + HostAccumulator, + HostColBroadcast, + > + >, + // D = activation(Z) * scaled_d, amax_d = max(abs(elements in D)) + HEVT< + HostCompute::Op>, + HEVT< + HostScalarReduce, + HEVT< + HostCompute, //activation(Z) * scaled_d + HostAccumulator, // Z + > + >, + HostScalarBroadcast, // scale_d + >, + // Aux = Z * scale_aux, amax_aux = max(abs(elements in Aux)) + HEVT< + HostAuxStore, + HEVT< + HostCompute::Op>, + HEVT< + HostScalarReduce, + HostAccumulator + >, + HostScalarBroadcast + > + > + > + >; +}; +} // namespace test::gemm::device + +////////////////////////////////////////////////////////////////////////////// +namespace cutlass::epilogue { +namespace fusion { + +////////////////////////////////////////////////////////////////////////////// +/// D = alpha * acc + beta * C + AuxLoad +template< + class EpilogueDescriptor, + class AuxLoadDescriptor, + class ElementOutput, + class ElementCompute, + class ElementScalar = ElementCompute, + FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest +> +using Sm90LinCombAuxLoad = + Sm90EVT, // beta * C + (alpha * acc + bias) + Sm90ScalarBroadcast, // beta + Sm90SrcFetch, // C + Sm90EVT, // alpha * acc + bias + Sm90ScalarBroadcast, // alpha + Sm90AccFetch, // acc + Sm90AuxLoad< + AuxLoadDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile, + typename AuxLoadDescriptor::Element, + typename AuxLoadDescriptor::Stride, typename AuxLoadDescriptor::SmemLayoutAtom, + typename AuxLoadDescriptor::CopyOpS2R // aux load + > + > + >; + + +////////////////////////////////////////////////////////////////////////////// +/// Example DAG +/// beta * C + Graph(alpha * acc + gamma + acc) +template< + typename EpilogueDescriptor, + typename AuxLoadDescriptor, + class ElementOutput, + class ElementCompute, + class ElementScalar = ElementCompute, + FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest +> +using Sm90LinCombEVTDAG = + Sm90EVT, // beta * C + (alpha * acc + aux) + Sm90ScalarBroadcast, // beta + Sm90SrcFetch, // C + Sm90TopologicalVisitor< + ElementCompute, + cute::tuple< + cute::seq<>, // 0. alpha + cute::seq<>, // 1. acc + cute::seq<>, // 2. aux load + cute::seq<1, 0, 2>, // 3. alpha * acc + aux load + cute::seq<3>, // relu(alpha & acc + aux load) + cute::seq<2, 4> // relu(alpha * acc + aux load) + aux load + >, + Sm90ScalarBroadcast, // alpha + Sm90AccFetch, // acc + Sm90AuxLoad< + AuxLoadDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile, + typename AuxLoadDescriptor::Element, typename AuxLoadDescriptor::Stride, + typename AuxLoadDescriptor::SmemLayoutAtom, typename AuxLoadDescriptor::CopyOpS2R>, + Sm90Compute, + Sm90Compute, + Sm90Compute + > + >; + + +////////////////////////////////////////////////////////////////////////////// +/// Example DAG +/// EVT = alpha * acc + C +/// D = Graph(maximum(EVT + per-row bias, EVT)) +template< + class EpilogueDescriptor, + class AuxStoreDescriptor, + class ElementOutput, + class ElementCompute, + class ElementBias = ElementOutput, + class ElementScalar = ElementCompute, + FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest +> +using Sm90LinCombDAGEVT = + Sm90TopologicalVisitor< + ElementCompute, + cute::tuple< + cute::seq<>, + cute::seq<>, + cute::seq<1, 0>, + cute::seq<0, 2> + >, + Sm90EVT< + Sm90AuxStore< + AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile, + typename AuxStoreDescriptor::Element, RoundStyle, typename AuxStoreDescriptor::Stride, + typename AuxStoreDescriptor::SmemLayoutAtom, typename AuxStoreDescriptor::CopyOpR2S>, + Sm90EVT, + Sm90ScalarBroadcast, + Sm90AccFetch, + Sm90SrcFetch + > + >, + Sm90ColBroadcast<0, typename EpilogueDescriptor::TileShape, ElementBias>, + Sm90Compute, + Sm90Compute + >; + + +////////////////////////////////////////////////////////////////////////////// +/// D = alpha * acc + beta * C + per-column bias +template< + class EpilogueDescriptor, + class ElementOutput, + class ElementCompute, + class ElementBias = ElementOutput, + class ElementScalar = ElementCompute, + FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest +> +using Sm90LinCombPerColumnBias = + Sm90EVT, // beta * C + (alpha * acc + bias) + Sm90ScalarBroadcast, // beta + Sm90SrcFetch, // C + Sm90EVT, // alpha * acc + bias + Sm90ScalarBroadcast, // alpha + Sm90AccFetch, // acc + Sm90RowBroadcast< + ceil_div( + EpilogueDescriptor::StagesC, + size(shape_div(take<0, 2>(typename EpilogueDescriptor::TileShape{}), typename EpilogueDescriptor::EpilogueTile{})) + ) + 1, + typename EpilogueDescriptor::TileShape, + ElementBias + > + > + >; + + +////////////////////////////////////////////////////////////////////////////// +/// D = per-column reduce(alpha * acc + beta * C) +template< + template class RegReduceFn, + template class GmemReduceFn, + class ElementReduce, + class CtaTileShapeMNK, + class ElementOutput, + class ElementCompute, + class ElementScalar = ElementCompute, + FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest +> +using Sm90LinCombPerColumnReduce = + Sm90EVT, // per column reduce + Sm90EVT, // beta * C + alpha * acc + Sm90ScalarBroadcast, // beta + Sm90SrcFetch, // C + Sm90EVT, // alpha * acc + Sm90ScalarBroadcast, // alpha + Sm90AccFetch // acc + > + > + >; + + +////////////////////////////////////////////////////////////////////////////// +/// D = per-row reduce(alpha * acc + beta * C) +template< + template class RegReduceFn, + template class GmemReduceFn, + class ElementReduce, + class CtaTileShapeMNK, + class ElementOutput, + class ElementCompute, + class ElementScalar = ElementCompute, + FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest +> +using Sm90LinCombPerRowReduce = + Sm90EVT, // per column reduce + Sm90EVT, // beta * C + alpha * acc + Sm90ScalarBroadcast, // beta + Sm90SrcFetch, // C + Sm90EVT, // alpha * acc + Sm90ScalarBroadcast, // alpha + Sm90AccFetch // acc + > + > + >; + + +////////////////////////////////////////////////////////////////////////////// +/// D = scalar reduce(alpha * acc + beta * C) +template< + template class RegReduceFn, + template class GmemReduceFn, + class ElementReduce, + class ElementOutput, + class ElementCompute, + class ElementScalar = ElementCompute, + FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest +> +using Sm90LinCombScalarReduce = + Sm90EVT, // per column reduce + Sm90EVT, // beta * C + alpha * acc + Sm90ScalarBroadcast, // beta + Sm90SrcFetch, // C + Sm90EVT, // alpha * acc + Sm90ScalarBroadcast, // alpha + Sm90AccFetch // acc + > + > + >; +} // namespace fusion + +} // namespace cutlass::epilogue diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_alignx_tensor_op_f32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_alignx_tensor_op_f32.cu new file mode 100644 index 0000000000000000000000000000000000000000..b0279f0ffff4d0972dda4a07673291281e8483ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_alignx_tensor_op_f32.cu @@ -0,0 +1,209 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_bf16t_bf16t_bf16n_align8_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::bfloat16_t, LayoutA, 8, + cutlass::bfloat16_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_bf16t_bf16n_bf16n_align4_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::bfloat16_t, LayoutA, 4, + cutlass::bfloat16_t, LayoutB, 4, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 4, + cutlass::bfloat16_t, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_bf16n_bf16t_bf16n_align2_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::bfloat16_t, LayoutA, 2, + cutlass::bfloat16_t, LayoutB, 2, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 2, + cutlass::bfloat16_t, LayoutC, 2, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_bf16n_bf16n_bf16n_align8_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::bfloat16_t, LayoutA, 8, + cutlass::bfloat16_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_tensor_op_f32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_tensor_op_f32.cu new file mode 100644 index 0000000000000000000000000000000000000000..a151126becfa32f860d7f58a3293935d972ea7ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_tensor_op_f32.cu @@ -0,0 +1,208 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_bf16t_bf16t_bf16n_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::bfloat16_t, LayoutA, 8, + cutlass::bfloat16_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_bf16t_bf16n_bf16n_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::bfloat16_t, LayoutA, 8, + cutlass::bfloat16_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_bf16n_bf16t_bf16n_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::bfloat16_t, LayoutA, 8, + cutlass::bfloat16_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_bf16n_bf16n_bf16n_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::bfloat16_t, LayoutA, 8, + cutlass::bfloat16_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_alignx_tensor_op.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_alignx_tensor_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..1e0c395b8bd542e6e1d0695f0f1339d391277c7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_alignx_tensor_op.cu @@ -0,0 +1,510 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////// TT ////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_align8_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelMultistage + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_align4_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 4, + cutlass::half_t, LayoutB, 4, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 4, + cutlass::half_t, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_align2_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 2, + cutlass::half_t, LayoutB, 2, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 2, + cutlass::half_t, LayoutC, 2, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////// TN ////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_align8_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelMultistage + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_align4_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 4, + cutlass::half_t, LayoutB, 4, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 4, + cutlass::half_t, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_align2_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 2, + cutlass::half_t, LayoutB, 2, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 2, + cutlass::half_t, LayoutC, 2, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////// NT ////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_align8_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelMultistage + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_align4_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 4, + cutlass::half_t, LayoutB, 4, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 4, + cutlass::half_t, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_align2_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 2, + cutlass::half_t, LayoutB, 2, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 2, + cutlass::half_t, LayoutC, 2, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////// NN ////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_align8_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelMultistage + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_align4_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 4, + cutlass::half_t, LayoutB, 4, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 4, + cutlass::half_t, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_align2_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 2, + cutlass::half_t, LayoutB, 2, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 2, + cutlass::half_t, LayoutC, 2, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..cc049da4919282e1bf38de2f407886d78f1bf7c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op.cu @@ -0,0 +1,919 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f32, 128x128x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f32, 64x64x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32, 128x128x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32, 64x64x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_tensor_op_gmma_f32, 128x128x32) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_tensor_op_gmma_f32, 64x64x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_tensor_op_gmma_f32, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_tensor_op_gmma_f32, 128x128x32) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_tensor_op_gmma_f32, 64x64x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f16, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f16, 128x128x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f16, 64x64x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f16, 64x128x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f16, 128x128x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f16, 64x64x64) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_tensor_op_gmma_f16, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_tensor_op_gmma_f16, 128x128x32) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16n_f16t_f16n_tensor_op_gmma_f16, 64x64x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_tensor_op_gmma_f16, 64x128x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_tensor_op_gmma_f16, 128x128x32) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16n_f16n_f16n_tensor_op_gmma_f16, 64x64x64) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + cutlass::half_t, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_64>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + cutlass::half_t, cutlass::half_t, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized.cu new file mode 100644 index 0000000000000000000000000000000000000000..1b3640b7dac2520d687adc17678b0e113412da3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized.cu @@ -0,0 +1,663 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 2x2x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_2x2x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_2x2x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 4x1x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_4x1x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_4,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_4,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_4x1x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_4,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_4,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_4x1x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_4,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_4,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_4x1x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_4,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_4,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 1x4x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_1x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_4,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_1x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_4,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_1x4x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_4,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_1x4x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_1,_4,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_1,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 2x4x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_2x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_2x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_2x4x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_warpspecialized, 64x128x64_2x4x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_64>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_aux_load.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_aux_load.cu new file mode 100644 index 0000000000000000000000000000000000000000..237a560dc780d515dcc45bca24e175fea7b2676f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_aux_load.cu @@ -0,0 +1,234 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for Sm90 f16_f16_f16 with cooperative EVT epilogue + D = alpha * acc + beta * c + aux_load +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_AuxLoadF16_RowMajor) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule + >; + using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor< + EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t + >; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombAuxLoad< + EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>; + + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostEVTAuxLoad< + Gemm, cutlass::half_t, cutlass::layout::RowMajor + >; + bool passed = test::gemm::device::TestAllEVT(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_AuxLoadF16_ColumnMajor) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule + >; + using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor< + EpilogueDescriptor, cutlass::layout::ColumnMajor, cutlass::half_t + >; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombAuxLoad< + EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostEVTAuxLoad< + Gemm, cutlass::half_t, cutlass::layout::ColumnMajor + >; + bool passed = test::gemm::device::TestAllEVT(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x64_2x2x1_AuxLoadF32_ColumnMajor) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule + >; + using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor< + EpilogueDescriptor, cutlass::layout::ColumnMajor, float + >; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombAuxLoad< + EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostEVTAuxLoad< + Gemm, float, cutlass::layout::ColumnMajor + >; + bool passed = test::gemm::device::TestAllEVT(); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_bias_elementwise.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_bias_elementwise.cu new file mode 100644 index 0000000000000000000000000000000000000000..a24d8d2b3412f753da5bd49e1fbe627a847dfc80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_bias_elementwise.cu @@ -0,0 +1,517 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with bias and elementwise epilogues. +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_ReLU) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombEltAct< + cutlass::epilogue::thread::ReLu, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + test::gemm::device::Testbed3x testbed; + bool passed = test::gemm::device::TestAll(1, 1, testbed); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU_Legacy) { +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" // Suppress deprecation warnings +#ifdef _MSC_VER +#pragma warning( push ) +#pragma warning( disable : 4996 ) +#endif // _MSC_VER + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + static constexpr bool StoreT = true; + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperativeBiasElementwise< + cutlass::epilogue::thread::ReLu, cutlass::half_t, cutlass::plus, StoreT, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +#ifdef _MSC_VER +#pragma warning( pop ) +#endif // _MSC_VER +#pragma GCC diagnostic pop // Re-enable deprecation warnings +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_GELU) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::GELU, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool check_relative_equality = true; + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1, check_relative_equality); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU_NoStoreT) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltAct< + cutlass::epilogue::thread::ReLu, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_Negate) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::negate, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU_VoidC) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + void, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF16_ReLU_VoidC) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, cutlass::half_t>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + void, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasS8_ReLU_VoidC) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, int8_t>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + void, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(); + EXPECT_TRUE(passed); +} + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_reduce.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_reduce.cu new file mode 100644 index 0000000000000000000000000000000000000000..998a2b7bc94845087cbcedf6c2cd66d54e209175 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_reduce.cu @@ -0,0 +1,201 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for Sm90 f16_f16_f16 cooperative EVT epilogue + D = row|column|scalar_reduce(alpha * acc + beta * C) +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_RowReduce) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerColumnReduce< + cutlass::plus, cutlass::red, float, TileShape_MNK, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostReduce; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_ColumnReduce) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerRowReduce< + cutlass::plus, cutlass::red, float, TileShape_MNK, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostReduce; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_ScalarReduce) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombScalarReduce< + cutlass::plus, cutlass::red, float, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostReduce; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_row_broadcast.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_row_broadcast.cu new file mode 100644 index 0000000000000000000000000000000000000000..06a17645ad230b129b040ec3776d990b7fc24959 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_row_broadcast.cu @@ -0,0 +1,163 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for Sm90 f16_f16_f16 cooperative EVT epilogue + D = alpha * acc + beta * C + per_column_bias +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_RowBroadcastF16) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerColumnBias< + EpilogueDescriptor, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + bool passed = test::gemm::device::TestAllEVT>(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_RowBroadcastF32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerColumnBias< + EpilogueDescriptor, cutlass::half_t, float, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + bool passed = test::gemm::device::TestAllEVT>(); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong.cu new file mode 100644 index 0000000000000000000000000000000000000000..171d4abec6c84a2ead3e5ce339614ff68cc6251f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong.cu @@ -0,0 +1,1279 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 64x128x64_1x1x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 64x128x64_2x1x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_2,_1,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 64x128x64_1x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 64x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 64x128x64_4x1x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 64x128x64_1x4x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 64x128x64_2x4x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 64x128x64_4x4x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_4,_4,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 128x128x64_1x1x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 128x128x64_2x1x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_1,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 128x128x64_1x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 128x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 128x128x64_4x1x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 128x128x64_1x4x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 128x128x64_2x4x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_persistent, 128x128x64_4x4x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_4,_4,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f16_persistent_Epilogue, 64x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using PreSwizzleLayout = Layout,Stride<_1,_64>>; + using TileShapeS2R = Shape<_64,_16>; + + using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::Epilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination, + ComposedLayout, smem_ptr_flag_bits>, PreSwizzleLayout>, + Copy_Atom, + TiledCopy,Layout,Stride<_8,_1>>,TileShapeS2R>, + Copy_Atom>>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f16_persistent_Epilogue, 128x64x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_64,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using PreSwizzleLayout = Layout,_64>,Stride,_64>>; + using TileShapeS2R = Shape<_128,_8>; + + using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::Epilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination, + ComposedLayout, smem_ptr_flag_bits>, PreSwizzleLayout>, + Copy_Atom, + TiledCopy,Layout,Stride<_8,_1>>,TileShapeS2R>, + Copy_Atom>>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f16_persistent_Epilogue, 64x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using PreSwizzleLayout = Layout>,Stride<_64,Stride<_1,_4096>>>; + using TileShapeS2R = Shape<_8,_128>; + + using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::Epilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination, + ComposedLayout, smem_ptr_flag_bits>, PreSwizzleLayout>, + Copy_Atom, + TiledCopy,Layout,Stride<_8,_1>>,TileShapeS2R>, + Copy_Atom>>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f16_persistent_Epilogue, 128x64x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_64,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using PreSwizzleLayout = Layout,Stride<_64,_1>>; + using TileShapeS2R = Shape<_16,_64>; + + using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::Epilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination, + ComposedLayout, smem_ptr_flag_bits>, PreSwizzleLayout>, + Copy_Atom, + TiledCopy,Layout,Stride<_8,_1>>,TileShapeS2R>, + Copy_Atom>>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32_persistent_Epilogue, 64x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using PreSwizzleLayout = Layout,Stride<_1,_64>>; + using TileShapeS2R = Shape<_64,_16>; + + using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::Epilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination, + ComposedLayout, smem_ptr_flag_bits>, PreSwizzleLayout>, + Copy_Atom, + TiledCopy,Layout,Stride<_8,_1>>,TileShapeS2R>, + Copy_Atom>>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32_persistent_Epilogue, 128x64x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_64,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using PreSwizzleLayout = Layout,_64>,Stride,_64>>; + using TileShapeS2R = Shape<_128,_8>; + + using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::Epilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination, + ComposedLayout, smem_ptr_flag_bits>, PreSwizzleLayout>, + Copy_Atom, + TiledCopy,Layout,Stride<_8,_1>>,TileShapeS2R>, + Copy_Atom>>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f32_persistent_Epilogue, 64x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_64,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using PreSwizzleLayout = Layout>,Stride<_64,Stride<_1,_4096>>>; + using TileShapeS2R = Shape<_8,_128>; + + using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::Epilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination, + ComposedLayout, smem_ptr_flag_bits>, PreSwizzleLayout>, + Copy_Atom, + TiledCopy,Layout,Stride<_8,_1>>,TileShapeS2R>, + Copy_Atom>>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f32_persistent_Epilogue, 128x64x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_64,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using PreSwizzleLayout = Layout,Stride<_64,_1>>; + using TileShapeS2R = Shape<_16,_64>; + + using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::Epilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination, + ComposedLayout, smem_ptr_flag_bits>, PreSwizzleLayout>, + Copy_Atom, + TiledCopy,Layout,Stride<_8,_1>>,TileShapeS2R>, + Copy_Atom>>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16 / sizeof(ElementA), + ElementB, LayoutB, 16 / sizeof(ElementB), + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + KernelSchedule + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16 / sizeof(ElementA), + ElementB, LayoutB, 16 / sizeof(ElementB), + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + KernelSchedule + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16 / sizeof(ElementA), + ElementB, LayoutB, 16 / sizeof(ElementB), + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + KernelSchedule + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + using StageCountType = cutlass::gemm::collective::StageCountAuto; + using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16 / sizeof(ElementA), + ElementB, LayoutB, 16 / sizeof(ElementB), + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + KernelSchedule + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll(1, 1)); +} + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_aux_load.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_aux_load.cu new file mode 100644 index 0000000000000000000000000000000000000000..7679378bfc1417eaa66cdba4f6441b52d4cb41ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_aux_load.cu @@ -0,0 +1,229 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for Sm90 f16_f16_f16 with persistent EVT epilogue + D = alpha * acc + beta * c + aux_load +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_AuxLoadF16_RowMajor) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule + >; + using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor< + EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t + >; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombAuxLoad< + EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>; + + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostEVTAuxLoad; + bool passed = test::gemm::device::TestAllEVT(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_AuxLoadF16_ColumnMajor) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule + >; + using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor< + EpilogueDescriptor, cutlass::layout::ColumnMajor, cutlass::half_t + >; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombAuxLoad< + EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostEVTAuxLoad; + bool passed = test::gemm::device::TestAllEVT(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_AuxLoadF32_ColumnMajor) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule + >; + using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor< + EpilogueDescriptor, cutlass::layout::ColumnMajor, float + >; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombAuxLoad< + EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostEVTAuxLoad; + bool passed = test::gemm::device::TestAllEVT(); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_bias_elementwise.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_bias_elementwise.cu new file mode 100644 index 0000000000000000000000000000000000000000..b3af865116d1393b804e19ac98d05ae55b3c9225 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_bias_elementwise.cu @@ -0,0 +1,463 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide persistent GEMM interface with bias and elementwise epilogues. +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_ReLU) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombEltAct< + cutlass::epilogue::thread::ReLu, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + test::gemm::device::Testbed3x testbed; + bool passed = test::gemm::device::TestAll(1, 1, testbed); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_BiasF32_ReLU) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_BiasF32_GELU) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::GELU, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool check_relative_equality = true; + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1, check_relative_equality); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_BiasF32_ReLU_NoStoreT) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltAct< + cutlass::epilogue::thread::ReLu, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_BiasF32_Negate) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::negate, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_BiasF32_ReLU) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(1, 1); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_BiasF32_ReLU_VoidC) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + void, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_BiasF16_ReLU_VoidC) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, cutlass::half_t>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + void, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_BiasS8_ReLU_VoidC) { + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, int8_t>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + void, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(); + EXPECT_TRUE(passed); +} + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_dag.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_dag.cu new file mode 100644 index 0000000000000000000000000000000000000000..57bfe786aaf89baf9687426ad331d4e963f16743 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_dag.cu @@ -0,0 +1,170 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for Sm90 f16_f16_f16 persistent DAG epilogue + EVTDAG: D = beta * C + Graph(relu(alpha * acc + aux) + aux) + DAGEVT: EVT = alpha * acc + C, D = Graph(maximum(EVT + per-row bias, EVT)) +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_EVTDAG) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>; + + using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor< + EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombEVTDAG< + EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + bool passed = test::gemm::device::TestAllEVT>(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_DAGEVT) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>; + + using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor< + EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombDAGEVT< + EpilogueDescriptor, AuxStoreDescriptor, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + bool passed = test::gemm::device::TestAllEVT>(); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_reduce.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_reduce.cu new file mode 100644 index 0000000000000000000000000000000000000000..9b0c42a07614d9edd5d12fe4987c680305d6bc32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_reduce.cu @@ -0,0 +1,201 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for Sm90 f16_f16_f16 persistent EVT epilogue + D = row|column|scalar_reduce(alpha * acc + beta * C) +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_RowReduce) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerColumnReduce< + cutlass::plus, cutlass::red, float, TileShape_MNK, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostReduce; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_ColumnReduce) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerRowReduce< + cutlass::plus, cutlass::red, float, TileShape_MNK, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostReduce; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_ScalarReduce) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombScalarReduce< + cutlass::plus, cutlass::red, float, cutlass::half_t, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostReduce; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_row_broadcast.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_row_broadcast.cu new file mode 100644 index 0000000000000000000000000000000000000000..760185bb922520abd18a9c42e4c2dbcbc1f1a2a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_row_broadcast.cu @@ -0,0 +1,163 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for Sm90 f16_f16_f16 persistent EVT epilogue + D = alpha * acc + beta * C + per_column_bias +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_RowBroadcastF16) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerColumnBias< + EpilogueDescriptor, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + bool passed = test::gemm::device::TestAllEVT>(); + EXPECT_TRUE(passed); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_RowBroadcastF32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerColumnBias< + EpilogueDescriptor, cutlass::half_t, float, float, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + bool passed = test::gemm::device::TestAllEVT>(); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu new file mode 100644 index 0000000000000000000000000000000000000000..fc4e3f31614fe808420180c8ae2fd0322c7da688 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu @@ -0,0 +1,992 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with stream-K scheduling +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/kernel/tile_scheduler.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x1x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_1x2x1) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementAccumulator = float; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 2x2x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_2x2x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x2x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 4x1x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 1x4x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 2x4x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::TmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + cutlass::epilogue::TmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 128x128x64_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::TmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 128x128x64_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::TmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1_BiasF32_ReLU) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< + LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::half_t, LayoutC, 8, + cutlass::half_t, LayoutC, 8, + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + bool passed = test::gemm::device::TestAllBiasElementwise(); + EXPECT_TRUE(passed); +} + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_tensor_broadcast.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_tensor_broadcast.cu new file mode 100644 index 0000000000000000000000000000000000000000..5436e78bfd5682d0dd7329698cfbcfde6981f6c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_tensor_broadcast.cu @@ -0,0 +1,297 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with an elementwise tensor-tensor broadcast epilogue +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp" +#include "cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_tensor_broadcast.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f32_tensor_broadcast, 64x128x64_ActIdentity_Bin0Plus_Bin1NoOp_UnaryIdentity) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using ElementOutput = float; + using ElementAccumulator = ElementOutput; + using ElementCompute = ElementOutput; + using ElementBias = ElementOutput; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + ElementOutput, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::EpilogueTensorBroadcast< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombinationTensorBroadcast, + cutlass::gemm::EpilogueDefault>>; + + EXPECT_TRUE(EpilogueOp::IsBinaryOp0Enabled); + EXPECT_TRUE(!EpilogueOp::IsBinaryOp1Enabled); + EXPECT_TRUE(!EpilogueOp::IsUnaryOpEnabled); + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f32_tensor_broadcast, 64x128x64_ActReLu_Bin0Plus_Bin1Plus_UnaryNegate) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using ElementOutput = float; + using ElementAccumulator = ElementOutput; + using ElementCompute = ElementOutput; + using ElementBias = ElementOutput; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + ElementOutput, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::EpilogueTensorBroadcast< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombinationTensorBroadcast< + ElementOutput, ElementAccumulator, ElementCompute, ElementBias, + cutlass::epilogue::thread::ReLu, + cutlass::plus, + cutlass::plus, + cutlass::negate + >, + cutlass::gemm::EpilogueDefault>>; + + EXPECT_TRUE(EpilogueOp::IsBinaryOp0Enabled); + EXPECT_TRUE(EpilogueOp::IsBinaryOp1Enabled); + EXPECT_TRUE(EpilogueOp::IsUnaryOpEnabled); + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16n_f16t_f16t_tensor_op_gmma_f32_tensor_broadcast, 64x128x64_ActReLu_Bin0Mul_Bin1Plus_UnaryNegate) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::RowMajor; + + using ElementOutput = float; + using ElementAccumulator = ElementOutput; + using ElementCompute = ElementOutput; + using ElementBias = ElementOutput; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + ElementOutput, + Shape<_64,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::EpilogueTensorBroadcast< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombinationTensorBroadcast< + ElementOutput, ElementAccumulator, ElementCompute, ElementBias, + cutlass::epilogue::thread::ReLu, + cutlass::multiplies, + cutlass::plus, + cutlass::negate + >, + cutlass::gemm::EpilogueDefault>>; + + EXPECT_TRUE(EpilogueOp::IsBinaryOp0Enabled); + EXPECT_TRUE(EpilogueOp::IsBinaryOp1Enabled); + EXPECT_TRUE(EpilogueOp::IsUnaryOpEnabled); + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f16n_tensor_op_gmma_f32_tensor_broadcast, 128x128x64_ActReLu_Bin0NoOp_Bin1Plus_UnaryNegate) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using ElementOutput = float; + using ElementAccumulator = ElementOutput; + using ElementCompute = ElementOutput; + using ElementBias = ElementOutput; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + ElementOutput, + Shape<_128,_128,_64>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::EpilogueTensorBroadcast< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombinationTensorBroadcast< + ElementOutput, ElementAccumulator, ElementCompute, ElementBias, + cutlass::epilogue::thread::ReLu, + cutlass::epilogue::thread::detail::NoOp, + cutlass::plus, + cutlass::negate + >, + cutlass::gemm::EpilogueDefault>>; + + EXPECT_TRUE(!EpilogueOp::IsBinaryOp0Enabled); + EXPECT_TRUE(EpilogueOp::IsBinaryOp1Enabled); + EXPECT_TRUE(EpilogueOp::IsUnaryOpEnabled); + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_warpspecialized_tensor_broadcast, 64x128x64_2x2x1_ActReLu_Bin0Mul_Bin1Plus_UnaryNegate) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using ElementOutput = float; + using ElementAccumulator = ElementOutput; + using ElementCompute = ElementOutput; + using ElementBias = ElementOutput; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::half_t, LayoutA, 8, + cutlass::half_t, LayoutB, 8, + float, + Shape<_64,_128,_64>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::EpilogueTensorBroadcast< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombinationTensorBroadcast< + ElementOutput, ElementAccumulator, ElementCompute, ElementBias, + cutlass::epilogue::thread::ReLu, + cutlass::multiplies, + cutlass::plus, + cutlass::negate + >, + cutlass::gemm::EpilogueDefault>>; + + EXPECT_TRUE(EpilogueOp::IsBinaryOp0Enabled); + EXPECT_TRUE(EpilogueOp::IsBinaryOp1Enabled); + EXPECT_TRUE(EpilogueOp::IsUnaryOpEnabled); + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32.cu new file mode 100644 index 0000000000000000000000000000000000000000..e4b92ff9384dd89bd18966cae476d59c01501656 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32.cu @@ -0,0 +1,162 @@ +/*************************************************************************************************** + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted + * provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, this list of + * conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/gemm/dispatch_policy.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f32t_f32n_f32n_tensor_op_gmma_f32, 64x128x32_1x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + float, LayoutA, 4, + float, LayoutB, 4, + float, + Shape<_64,_128,_128>, Shape<_1,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f32t_f32t_f32n_tensor_op_gmma_f32, 64x128x32_1x1x1_pingpong) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::gemm::EpilogueTransposed + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + float, LayoutA, 4, + float, LayoutB, 4, + float, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_f32t_f32t_f32n_tensor_op_gmma_f32, 128x128x32_1x1x1_cooperative) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::gemm::EpilogueTransposed + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + float, LayoutA, 4, + float, LayoutB, 4, + float, + Shape<_128,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32_tensor_broadcast.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32_tensor_broadcast.cu new file mode 100644 index 0000000000000000000000000000000000000000..735d14fb90de25b01a9938cc050656608f29f6f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32_tensor_broadcast.cu @@ -0,0 +1,102 @@ +/*************************************************************************************************** + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted + * provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, this list of + * conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with an elementwise tensor-tensor broadcast epilogue +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp" +#include "cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_tensor_broadcast.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_f32t_f32n_f32n_tensor_op_gmma_f32_tensor_broadcast, 64x128x32_1x2x1_ActReLU_Bin0Mul_Bin1Plus_UnaryHardSwish) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using ElementOutput = float; + using ElementAccumulator = ElementOutput; + using ElementCompute = ElementOutput; + using ElementBias = ElementOutput; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + float, LayoutA, 4, + float, LayoutB, 4, + float, + Shape<_64,_128,_128>, Shape<_1,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::EpilogueTensorBroadcast< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombinationTensorBroadcast< + ElementOutput, ElementAccumulator, ElementCompute, ElementBias, + cutlass::epilogue::thread::ReLu, + cutlass::multiplies, + cutlass::plus, + cutlass::epilogue::thread::HardSwish + >, + cutlass::gemm::EpilogueDefault>>; + + EXPECT_TRUE(EpilogueOp::IsBinaryOp0Enabled); + EXPECT_TRUE(EpilogueOp::IsBinaryOp1Enabled); + EXPECT_TRUE(EpilogueOp::IsUnaryOpEnabled); + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_bf16_tensor_op_fp32_evt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_bf16_tensor_op_fp32_evt.cu new file mode 100644 index 0000000000000000000000000000000000000000..8db3ceff6b615fe1143f0098760fbda214d50383 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_bf16_tensor_op_fp32_evt.cu @@ -0,0 +1,197 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for Sm90 f8_f8_bf16 with EVT epilogue + ScaledLinCombPerRowBiasEltAct and ScaledLinCombPerRowBiasEltActAmaxAux +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias +// if D is fp8 +// D = scale_d * activation(Z) +// else +// D = activation(Z) +TEST(SM90_Device_Gemm_e4m3t_e4m3n_bf16t_tensor_op_gmma_f32_epilogue, 64x128x128_ScaledLinCombPerRowBiasEltAct) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_64,_128,_128>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90ScaledLinCombPerRowBiasEltAct< + TileShape_MNK, // CtaTileShapeMNK + cutlass::epilogue::thread::ReLu, // ActivationFn + cutlass::bfloat16_t, // ElementOutput + float, // ElementCompute + cutlass::bfloat16_t // ElementBias + >; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::bfloat16_t, LayoutC, 8, + cutlass::bfloat16_t, LayoutC, 8, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostScaledLinCombPerRowBiasEltAct< + Gemm, cutlass::epilogue::thread::ReLu, cutlass::bfloat16_t + >; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} + +// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias +// if D is fp8 +// amax_d = max(abs(elements in activation(Z))) +// D = scale_d * activation(Z) +// else +// D = activation(Z) +// if Aux is fp8 +// amax_aux = max(abs(elements in Z)) +// Aux = scale_aux * Z +// else +// Aux = Z +TEST(SM90_Device_Gemm_e4m3t_e4m3n_bf16n_tensor_op_gmma_f32_epilogue, 64x128x128_4x1x1_ScaledLinCombPerRowBiasEltActAmaxAux) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_64,_128,_128>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::bfloat16_t, cutlass::bfloat16_t, EpilogueSchedule>; + using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor< + EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::bfloat16_t>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90ScaledLinCombPerRowBiasEltActAmaxAux< + TileShape_MNK, // CtaTileShapeMNK + typename EpilogueDescriptor::EpilogueTile, // EpilogueTile + EpilogueDescriptor::StagesD, // StagesD + typename AuxStoreDescriptor::Stride, // StrideAux + typename AuxStoreDescriptor::SmemLayoutAtom, // SmemLayoutAtom + typename AuxStoreDescriptor::CopyOpR2S, // CopyOpR2S + cutlass::epilogue::thread::ReLu, // ActivationFn + cutlass::bfloat16_t, // ElementOutput + float, // ElementCompute + cutlass::bfloat16_t, // ElementBias + float // ElementScalar + >; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::bfloat16_t, LayoutC, 16, + cutlass::bfloat16_t, LayoutC, 16, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostScaledLinCombPerRowBiasEltActAmaxAux< + Gemm, cutlass::epilogue::thread::ReLu, cutlass::bfloat16_t + >; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cluster_warpspecialized_cooperative.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cluster_warpspecialized_cooperative.cu new file mode 100644 index 0000000000000000000000000000000000000000..6fd664b98553808fcfcbad7e1015ec341a19ee3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cluster_warpspecialized_cooperative.cu @@ -0,0 +1,533 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + + /*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 128x128x128_1x1x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 256x128x128_1x1x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 2x1x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 128x128x128_1x2x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 256x128x128_1x2x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 1x4x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 128x128x128_1x4x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 256x128x128_1x4x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 4x1x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 128x128x128_4x1x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 256x128x128_4x1x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 2x4x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 128x128x128_2x4x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative, 256x128x128_2x4x1_fp8_fast_accum) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cluster_warpspecialized_cooperative_evt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cluster_warpspecialized_cooperative_evt.cu new file mode 100644 index 0000000000000000000000000000000000000000..d6f5e272bbace6544e22653ae8c3820f90a92882 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cluster_warpspecialized_cooperative_evt.cu @@ -0,0 +1,197 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for Sm90 f8_f8_f32 with EVT epilogue + ScaledLinCombPerRowBiasEltAct and ScaledLinCombPerRowBiasEltActAmaxAux +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias +// if D is fp8 +// D = scale_d * activation(Z) +// else +// D = activation(Z) +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_epilogue, 128x128x128_1x4x1_ScaledLinCombPerRowBiasEltAct) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90ScaledLinCombPerRowBiasEltAct< + TileShape_MNK, // CtaTileShapeMNK + cutlass::epilogue::thread::ReLu, // ActivationFn + float, // ElementOutput + float, // ElementCompute + float // ElementBias + >; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostScaledLinCombPerRowBiasEltAct< + Gemm, cutlass::epilogue::thread::ReLu, float + >; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} + +// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias +// if D is fp8 +// amax_d = max(abs(elements in activation(Z))) +// D = scale_d * activation(Z) +// else +// D = activation(Z) +// if Aux is fp8 +// amax_aux = max(abs(elements in Z)) +// Aux = scale_aux * Z +// else +// Aux = Z +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x128_1x2x1_ScaledLinCombPerRowBiasEltActAmaxAux) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, float, float, EpilogueSchedule>; + using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor< + EpilogueDescriptor, cutlass::layout::RowMajor, float>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90ScaledLinCombPerRowBiasEltActAmaxAux< + TileShape_MNK, // CtaTileShapeMNK + typename EpilogueDescriptor::EpilogueTile, // EpilogueTile + EpilogueDescriptor::StagesD, // StagesD + typename AuxStoreDescriptor::Stride, // StrideAux + typename AuxStoreDescriptor::SmemLayoutAtom, // SmemLayoutAtom + typename AuxStoreDescriptor::CopyOpR2S, // CopyOpR2S + cutlass::epilogue::thread::ReLu, // ActivationFn + float, // ElementOutput + float, // ElementCompute + float, // ElementBias + float // ElementScalar + >; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostScaledLinCombPerRowBiasEltActAmaxAux< + Gemm, cutlass::epilogue::thread::ReLu, float + >; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cooperative_stream_k.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cooperative_stream_k.cu new file mode 100644 index 0000000000000000000000000000000000000000..45b9d023f104dd854570761e6119193c93aee3db --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_f32_cooperative_stream_k.cu @@ -0,0 +1,544 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + + /*! \file + \brief Tests for device-wide GEMM interface with stream-K interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/kernel/tile_scheduler.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x128_1x1x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x128_1x1x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 2x1x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x128_1x2x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x128_1x2x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_1,_2,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 1x4x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x128_1x4x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x128_1x4x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_1,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 4x1x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x128_4x1x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x128_4x1x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_4,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////// Cluster 2x4x1 //////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x128_2x4x1) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_128,_128,_128>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x128_2x4x1_fp8_fast_accum) { + using ElementA = cutlass::float_e4m3_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::float_e4m3_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using TileShape_MNK = Shape<_256,_128,_128>; + using ClusterShape_MNK = Shape<_2,_4,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, ElementC, ElementAccumulator, ElementAccumulator>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + ElementAccumulator, ElementAccumulator, + ElementC, LayoutC, 16 / sizeof(ElementC), + ElementC, LayoutC, 16 / sizeof(ElementC), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, LayoutA, 16, + ElementB, LayoutB, 16, + ElementAccumulator, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp, + cutlass::gemm::StreamKScheduler + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_fp32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_fp32.cu new file mode 100644 index 0000000000000000000000000000000000000000..38cc6a6d582c83f72838dfb7411fefa9ea67a451 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f32_tensor_op_fp32.cu @@ -0,0 +1,554 @@ + +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + + /*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// FP32 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, float, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 16 / sizeof(float), + float, LayoutC, 16 / sizeof(float), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// FP32 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128_bias_f32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, float, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 16 / sizeof(float), + float, LayoutC, 16 / sizeof(float), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// FP32 = e5m2 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e5m2t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, float, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 16 / sizeof(float), + float, LayoutC, 16 / sizeof(float), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e5m2_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// FP32 = e4m3 * e5m2 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e5m2n_f32n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, float, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 16 / sizeof(float), + float, LayoutC, 16 / sizeof(float), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e5m2_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 2x2x1 ////////////////////////////////// +///////////////////////////// FP32 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, float, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_2,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 16 / sizeof(float), + float, LayoutC, 16 / sizeof(float), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 1x4x1 ////////////////////////////////// +///////////////////////////// FP32 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128_1x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, float, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 16 / sizeof(float), + float, LayoutC, 16 / sizeof(float), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 4x1x1 ////////////////////////////////// +///////////////////////////// FP32 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128_4x1x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, float, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_4,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 16 / sizeof(float), + float, LayoutC, 16 / sizeof(float), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_4,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 2x4x1 ////////////////////////////////// +///////////////////////////// FP32 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128_2x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, float, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 16 / sizeof(float), + float, LayoutC, 16 / sizeof(float), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +//////////////////////////////// TMA epilogue ///////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128_tma_epilogue) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32t_tensor_op_gmma_f32, 64x128x128_tma_epilogue) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32n_tensor_op_gmma_f32, 64x128x128_tma_epilogue_fp8_fast_accum) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_f32t_tensor_op_gmma_f32, 64x128x128_tma_epilogue_fp8_fast_accum) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32.cu new file mode 100644 index 0000000000000000000000000000000000000000..eb60ad95b40ead04fc203fbb314a1f937a1a5faf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32.cu @@ -0,0 +1,1221 @@ + +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + + /*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; +/////////////////////////////////////////////////////////////////////////////// +//////////////////////////////// output: E4M3 ///////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e5m2 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e5m2t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e5m2_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e5m2 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e5m2_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 2x2x1 ////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_2,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 1x4x1 ////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_1x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 4x1x1 ////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_4x1x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_4,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_4,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 2x4x1 ////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_2x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +//////////////////////////////// output: E5M2 ///////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e5m2 = e5m2 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e5m2t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e5m2_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e5m2 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e5m2n_e5m2n_tensor_op_gmma_f32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e5m2_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 2x2x1 ////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_2,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 1x4x1 ////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_1x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 4x1x1 ////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_4x1x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_4,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_4,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 2x4x1 ////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 2x4x1 ////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_persistent) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// Cluster 2x4x1 ////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_non_warpspecialized) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + + + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////// output: E4M3 + Aux Tensor /////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_e4m3) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux< + LayoutC, cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, cutlass::float_e4m3_t>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +////////////////////////////////// FP8 Accum ///////////////////////////////// +///////////////////////////// e5m2 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_persistent_fp8_fast_accum) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_fp8_fast_accum) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_2,_4,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + + +/////////////////////////////////////////////////////////////////////////////// +////////////////////////// output: E4M3 + Bias /////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_bias_bf16) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, cutlass::bfloat16_t>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + + +/////////////////////////////////////////////////////////////////////////////// +////////////////////////// output: E4M3 + Bias + Relu //////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e4m3 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_bias_bf16_relu) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct< + cutlass::epilogue::thread::ReLu, cutlass::float_e4m3_t, float, cutlass::bfloat16_t>; + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////// output: E4M3 + Aux Tensor + Bias///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e5m2 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_bias_f16) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux< + LayoutC, cutlass::epilogue::thread::Identity, + cutlass::float_e4m3_t, // ElementOutput + float, // ElementCompute + cutlass::half_t, // ElementAux + float, // ElementAmax + cutlass::half_t>; // ElementBias + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e5m2_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////// output: E4M3 + Aux Tensor + Bias + Relu///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e5m2 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_relu) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux< + LayoutC, cutlass::epilogue::thread::ReLu, + cutlass::float_e4m3_t, // ElementOutput + float, // ElementCompute + cutlass::half_t, // ElementAux + float, // ElementAmax + float>; // ElementBias + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e5m2_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// e4m3 = e4m3 * e5m2 (TN) ///////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_bias_f16_relu) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux< + LayoutC, cutlass::epilogue::thread::ReLu, + cutlass::float_e4m3_t, // ElementOutput + float, // ElementCompute + cutlass::half_t, // ElementAux + float, // ElementAmax + cutlass::half_t>; // ElementBias + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t), + EpilogueSchedule, + FusionOperation + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e5m2_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise()); +} + +/////////////////////////////////////////////////////////////////////////////// +//////////////////////////////// TMA epilogue ///////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_tma_epilogue) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16, + cutlass::float_e4m3_t, LayoutC, 16, + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3t_tensor_op_gmma_f32, 64x128x128_tma_epilogue) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16, + cutlass::float_e4m3_t, LayoutC, 16, + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32_evt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32_evt.cu new file mode 100644 index 0000000000000000000000000000000000000000..21871e0c12534c1fd08e3fce277852e2f41fa0f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32_evt.cu @@ -0,0 +1,197 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for Sm90 f8_f8_bf16 with EVT epilogue + ScaledLinCombPerRowBiasEltAct and ScaledLinCombPerRowBiasEltActAmaxAux +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_evt.hpp" +#include "sm90_evt_operations.hpp" + + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias +// if D is fp8 +// D = scale_d * activation(Z) +// else +// D = activation(Z) +TEST(SM90_Device_Gemm_f8t_f8n_f8t_tensor_op_gmma_f32_persistent_epilogue, 64x128x128_1x1x1_ScaledLinCombPerRowBiasEltAct) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_64,_128,_128>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using FusionCallbacks = cutlass::epilogue::fusion::Sm90ScaledLinCombPerRowBiasEltAct< + TileShape_MNK, // CtaTileShapeMNK + cutlass::epilogue::thread::ReLu, // ActivationFn + cutlass::float_e4m3_t, // ElementOutput + float, // ElementCompute + cutlass::float_e4m3_t // ElementBias + >; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + cutlass::float_e4m3_t, LayoutC, 16, + cutlass::float_e4m3_t, LayoutC, 16, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostScaledLinCombPerRowBiasEltAct< + Gemm, cutlass::epilogue::thread::ReLu, cutlass::float_e4m3_t + >; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} + +// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias +// if D is fp8 +// amax_d = max(abs(elements in activation(Z))) +// D = scale_d * activation(Z) +// else +// D = activation(Z) +// if Aux is fp8 +// amax_aux = max(abs(elements in Z)) +// Aux = scale_aux * Z +// else +// Aux = Z +TEST(SM90_Device_Gemm_f8t_f8n_f8t_tensor_op_gmma_f32_persistent_epilogue, 64x128x128_1x1x1_ScaledLinCombPerRowBiasEltActAmaxAux) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + using TileShape_MNK = Shape<_64,_128,_128>; + using ClusterShape_MNK = Shape<_1,_1,_1>; + + using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized; + using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; + using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape_MNK, EpilogueTileType, cutlass::float_e4m3_t, cutlass::float_e4m3_t, EpilogueSchedule>; + using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor< + EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::float_e4m3_t>; + + using FusionCallbacks = cutlass::epilogue::fusion::Sm90ScaledLinCombPerRowBiasEltActAmaxAux< + TileShape_MNK, // CtaTileShapeMNK + typename EpilogueDescriptor::EpilogueTile, // EpilogueTile + EpilogueDescriptor::StagesD, // StagesD + typename AuxStoreDescriptor::Stride, // StrideAux + typename AuxStoreDescriptor::SmemLayoutAtom, // SmemLayoutAtom + typename AuxStoreDescriptor::CopyOpR2S, // CopyOpR2S + cutlass::epilogue::thread::ReLu, // ActivationFn + cutlass::float_e4m3_t, // ElementOutput + float, // ElementCompute + cutlass::float_e4m3_t, // ElementBias + float // ElementScalar + >; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + TileShape_MNK, ClusterShape_MNK, + EpilogueTileType, + float, float, + cutlass::float_e4m3_t, LayoutC, 16, + cutlass::float_e4m3_t, LayoutC, 16, + EpilogueSchedule, + FusionCallbacks + >::CollectiveOp; + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::float_e4m3_t, LayoutA, 16, + cutlass::float_e4m3_t, LayoutB, 16, + float, + TileShape_MNK, ClusterShape_MNK, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Host reference + using HostReference = test::gemm::device::HostScaledLinCombPerRowBiasEltActAmaxAux< + Gemm, cutlass::epilogue::thread::ReLu, cutlass::float_e4m3_t + >; + bool passed = test::gemm::device::TestAllEVT(true); + EXPECT_TRUE(passed); +} +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32.cu new file mode 100644 index 0000000000000000000000000000000000000000..eca11ae2ddab2f33d87d0ca94684f0fb868de954 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32.cu @@ -0,0 +1,418 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32, 64x128x128_1x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_64,_128,_128>, Shape<_1,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32, 128x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32, 128x128x128_1x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_128,_128,_128>, Shape<_1,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32, 128x128x128_2x1x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_128,_128,_128>, Shape<_2,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32, 128x128x128_2x2x1) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_128,_128,_128>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32_pingpong_epilogue, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_s8t_s8n_s8t_tensor_op_gmma_s32_pingpong_epilogue, 64x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::TmaWarpSpecialized + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_64,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedPingpong + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32_cooperative_epilogue, 128x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::TmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_s8t_s8n_s8t_tensor_op_gmma_s32_cooperative_epilogue, 128x128x128) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + int32_t, int32_t, + int8_t, LayoutC, 16, + int8_t, LayoutC, 16, + cutlass::epilogue::TmaWarpSpecializedCooperative + >::CollectiveOp; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_128,_128,_128>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAutoCarveout, + cutlass::gemm::KernelTmaWarpSpecializedCooperative + >::CollectiveOp; + + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32_tensor_broadcast.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32_tensor_broadcast.cu new file mode 100644 index 0000000000000000000000000000000000000000..a1f352d64d09c095a085530e9cc0a3666739524d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32_tensor_broadcast.cu @@ -0,0 +1,102 @@ +/*************************************************************************************************** + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted + * provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, this list of + * conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface with an elementwise tensor-tensor broadcast epilogue +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp" +#include "cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x_tensor_broadcast.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32_tensor_broadcast, 128x128x128_2x2x1_ActReLU_Bin0Mul_Bin1Plus_UnaryHardSwish) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using ElementOutput = int32_t; + using ElementAccumulator = ElementOutput; + using ElementCompute = ElementOutput; + using ElementBias = ElementOutput; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + int8_t, LayoutA, 16, + int8_t, LayoutB, 16, + int32_t, + Shape<_128,_128,_128>, Shape<_2,_2,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< + cutlass::epilogue::collective::EpilogueTensorBroadcast< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombinationTensorBroadcast< + ElementOutput, ElementAccumulator, ElementCompute, ElementBias, + cutlass::epilogue::thread::ReLu, + cutlass::multiplies, + cutlass::plus, + cutlass::epilogue::thread::HardSwish + >, + cutlass::gemm::EpilogueDefault>>; + + EXPECT_TRUE(EpilogueOp::IsBinaryOp0Enabled); + EXPECT_TRUE(EpilogueOp::IsBinaryOp1Enabled); + EXPECT_TRUE(EpilogueOp::IsUnaryOpEnabled); + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + EpilogueOp + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_stream_k_scheduler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_stream_k_scheduler.cu new file mode 100644 index 0000000000000000000000000000000000000000..989d60aab79c9bc8194edbdbd14d13374c31a4e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_stream_k_scheduler.cu @@ -0,0 +1,327 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests that the stream-K scheduler covers the entire problem space. +*/ + +#include "cutlass/cluster_launch.hpp" +#include "cutlass/kernel_hardware_info.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp" +#include "cutlass/util/device_memory.h" +#include "cutlass/util/reference/device/tensor_fill.h" + +#include "../../common/cutlass_unit_test.h" + +// Grids are launched with clusters enabled in these tests, +// so the CTK version must support cluster launching. +#if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED) + +using namespace cute; +using ProblemShape_MNKL = Shape; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel for getting each piece of work for a given block from the scheduler and logging +/// the K iterations visited by the block. +template < + class Scheduler, + class TileShape, + class ClusterShape +> +__global__ +void +run_scheduler(int* visit_counters, typename Scheduler::Params params, TileShape tile_shape, ClusterShape cluster_shape, ProblemShape_MNKL problem_shape_mnkl) { + Scheduler scheduler{params}; + auto work_tile_info = scheduler.get_current_work(); + + while (work_tile_info.is_valid_tile) { + // Increment counters to indicate coverage + auto tile_idx = Scheduler::output_tile_index(params, work_tile_info); + auto offset = tile_idx * params.k_tiles_per_output_tile_ + work_tile_info.K_idx; + for (auto i = 0; i < work_tile_info.k_tile_count; ++i) { + // Use atomicAdd because the visit counters are shared by multiple thread blocks. + // While having more than one block increment the same counter indicates failure, + // we need to ensure that this behavior is captured (by having both increments reflected). + atomicAdd(visit_counters + offset + i, 1); + } + + bool continue_current = scheduler.continue_current_work(work_tile_info); + if (!continue_current) { + scheduler.advance_to_next_work(); + work_tile_info = scheduler.get_current_work(); + } + } +} + +/// Host-side wrapper for launching the kernel to test the scheduler. +template < + class TileShape, + class ClusterShape, + uint32_t NumMmaWarpGroups = 2 +> +bool +test_scheduler( + ProblemShape_MNKL problem_shape_mnkl, + TileShape tile_shape, + ClusterShape cluster_shape, + int sm_count, + int splits=1, + bool expect_data_parallel=false) { + + using Scheduler = cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamK; + + cutlass::KernelHardwareInfo hw_info{0, sm_count}; + auto params = Scheduler::to_underlying_arguments(problem_shape_mnkl, tile_shape, cluster_shape, hw_info, {splits}, nullptr); + + // If we expect the schedule to be data-parallel only, ensure that no stream-K tiles are launched. + if (expect_data_parallel && params.sk_tiles_ != 0) { + return false; + } + + // Allocate counters indicating the number of times each k iteration of each output tile has been visited + auto [blk_m, blk_n, blk_l] = Scheduler::get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + auto total_counters = blk_m * blk_n * blk_l * params.k_tiles_per_output_tile_; + cutlass::DeviceAllocation visit_counters(total_counters); + + // Initialize counters to zero + cudaError_t err = cudaMemset((void*)visit_counters.get(), 0, sizeof(int) * total_counters); + if (err != cudaSuccess) { + std::cerr << __FILE__ << ":" << __LINE__ << " cudaMemset failed with error: " << cudaGetErrorString(err) << std::endl; + return false; + } + + typename Scheduler::Arguments args{}; + + // Set up the grid for the problem + dim3 grid = Scheduler::get_grid_shape(problem_shape_mnkl, tile_shape, cluster_shape, hw_info, args); + + // Set up cluster and cluster launch. This is needed even for this simple kernel because + // the SM90 scheduler needs to be able to query the CTA id within a cluster, which requires + // explicitly launching with clusters. + dim3 cluster{ + static_cast(cute::get<0>(ClusterShape{})), + static_cast(cute::get<1>(ClusterShape{})), + static_cast(cute::get<2>(ClusterShape{})) + }; + + cudaLaunchConfig_t launch_config; + launch_config.gridDim = grid; + launch_config.blockDim = {1, 1, 1}; + launch_config.dynamicSmemBytes = 0; + launch_config.stream = NULL; + + cudaLaunchAttribute launch_attribute[1]; + launch_attribute[0].id = cudaLaunchAttributeClusterDimension; + launch_attribute[0].val.clusterDim.x = cluster.x; + launch_attribute[0].val.clusterDim.y = cluster.y; + launch_attribute[0].val.clusterDim.z = cluster.z; + + launch_config.attrs = launch_attribute; + launch_config.numAttrs = 1; + + void const* kernel = (void const*) run_scheduler; + int* counters_ptr = visit_counters.get(); + void* kernel_params[] = { + &counters_ptr, + ¶ms, + &tile_shape, + &cluster_shape, + &problem_shape_mnkl + }; + + // Run the scheduler to completion and log visits to each k iteration + err = cudaLaunchKernelExC(&launch_config, kernel, kernel_params); + + if (err != cudaSuccess) { + std::cerr << __FILE__ << ":" << __LINE__ + << " cudaLaunchKernelExC failed with error: " + << cudaGetErrorString(err) << std::endl; + return false; + } + + err = cudaDeviceSynchronize(); + if (err != cudaSuccess) { + std::cerr << __FILE__ << ":" << __LINE__ + << " scheduler kernel failed with error: " + << cudaGetErrorString(err) << std::endl; + return false; + } + + // Copy visit counts back to host and ensure that all entries are ones + std::vector host_visit_counts(total_counters); + visit_counters.copy_to_host(host_visit_counts.data()); + + for (size_t i = 0; i < host_visit_counts.size(); ++i) { + if (host_visit_counts[i] != 1) { + // for (int count : host_visit_counts) { + // if (count != 1) { + std::cout << "Failed with problem size " + << size<0>(problem_shape_mnkl) << "x" + << size<1>(problem_shape_mnkl) << "x" + << size<2>(problem_shape_mnkl) << "x" + << size<3>(problem_shape_mnkl) + << " and grid size " << grid.x << "x" + << grid.y << "x" << grid.z + << " splits=" << params.splits_ + << " k_iter=" << params.k_tiles_per_output_tile_ + << " big_units=" << params.big_units_ + << " sk_tiles=" << params.sk_tiles_ + << " sk_units=" << params.sk_units_ + << " k_tiles_per_sk_unit=" << params.k_tiles_per_sk_unit_ << std::endl; + std::cout << "Error at idx: " << i << ". Got count " << host_visit_counts[i] << std::endl; + return false; + } + } + + return true; +} + +/// Executes tests of the scheduler with a sweep across problem size K +template < + class TileShape, + class ClusterShape +> +bool sweep_k( + ProblemShape_MNKL problem_shape_mnkl, + TileShape tile_shape, + ClusterShape cluster_shape, + int sm_count, + int splits=1, + bool expect_data_parallel=false, + int k_start=128, + int k_stop=16384, + int k_step=0) { + + if (k_step == 0) { + k_step = 4 * cute::size<2>(tile_shape); + } + + for (int k = k_start; k <= k_stop; k += k_step) { + ProblemShape_MNKL problem{get<0>(problem_shape_mnkl), get<1>(problem_shape_mnkl), k, get<3>(problem_shape_mnkl)}; + bool passed = test_scheduler(problem, tile_shape, cluster_shape, sm_count, splits, expect_data_parallel); + if (!passed) { + return false; + } + } + + return true; +} + +/// Executes tests of the scheduler that are expected to result in a data-parallel schedule. +/// This function assumes that the problem, tile, and cluster shape, alongside the SM count, +/// are such that the problem executes only full waves on the device. +template < + class TileShape, + class ClusterShape +> +bool test_data_parallel( + int blocks_m, + int blocks_n, + TileShape tile_shape, + ClusterShape cluster_shape, + int sm_count) { + + // Since the configuration passed in executes only full waves, increasing + // the batch dimension simply results in running more full waves. + for (int l = 1; l < 4; ++l) { + ProblemShape_MNKL problem_shape{ + size<0>(tile_shape) * blocks_m, size<1>(tile_shape) * blocks_n, 1, l}; + bool passed = sweep_k(problem_shape, tile_shape, cluster_shape, sm_count, /*splits=*/1, /*expect_data_parallel=*/true); + + if (!passed) { + return false; + } + } + return true; +} + +/// Executes tests of the scheduler on the generic stream-K decomposition. +template < + class TileShape, + class ClusterShape +> +bool test_stream_k( + TileShape tile_shape, + ClusterShape cluster_shape, + int sm_count) { + + int tile_m = size<0>(tile_shape); + int tile_n = size<1>(tile_shape); + + for (int m_blocks = 1; m_blocks <= 24; ++m_blocks) { + for (int n_blocks = 1; n_blocks <= 24; ++n_blocks) { + for (int l = 1; l < 4; ++l) { + ProblemShape_MNKL problem{m_blocks * tile_m, n_blocks * tile_n, 1, l}; + if (!sweep_k(problem, tile_shape, cluster_shape, sm_count)) { + return false; + } + } + } + } + + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_stream_k_scheduler, 256x128x64_2x1x1) { + using TileShape_MNK = Shape<_256,_128,_64>; + using ClusterShape_MNK = Shape<_2,_1,_1>; + + TileShape_MNK tile_shape; + ClusterShape_MNK cluster_shape; + + // Test various data-parallel cases + EXPECT_TRUE(test_data_parallel(/*blocks_m=*/ 4, /*blocks_n=*/ 4, tile_shape, cluster_shape, /*sm_count=*/ 16)); + EXPECT_TRUE(test_data_parallel(/*blocks_m=*/16, /*blocks_n=*/ 4, tile_shape, cluster_shape, /*sm_count=*/ 64)); + EXPECT_TRUE(test_data_parallel(/*blocks_m=*/ 4, /*blocks_n=*/27, tile_shape, cluster_shape, /*sm_count=*/108)); + + // Test various stream-K cases + EXPECT_TRUE(test_stream_k(tile_shape, cluster_shape, /*sm_count=*/ 16)); + EXPECT_TRUE(test_stream_k(tile_shape, cluster_shape, /*sm_count=*/ 64)); + EXPECT_TRUE(test_stream_k(tile_shape, cluster_shape, /*sm_count=*/108)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_stream_k_scheduler, 128x128x64_2x1x1) { + using TileShape_MNK = Shape<_128,_128,_64>; + using ClusterShape_MNK = Shape<_2,_1,_1>; + + TileShape_MNK tile_shape; + ClusterShape_MNK cluster_shape; + + EXPECT_TRUE(test_scheduler({128, 512, 2048, 1}, tile_shape, cluster_shape, 114)); +} + +#endif // defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_alignx_tensor_op_f32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_alignx_tensor_op_f32.cu new file mode 100644 index 0000000000000000000000000000000000000000..bb25de29b2d190deaee1d31e32d272c7597b8cb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_alignx_tensor_op_f32.cu @@ -0,0 +1,167 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_align4_tensor_op_gmma_f32, 64x128x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + tfloat32_t, LayoutA, 4, + tfloat32_t, LayoutB, 4, + float, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelMultistage + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::NoSmemWarpSpecialized + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_align2_tensor_op_gmma_f32, 64x64x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::tfloat32_t, LayoutA, 2, + cutlass::tfloat32_t, LayoutB, 2, + float, + Shape<_64,_64,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_64,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 2, + float, LayoutC, 2, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_align1_tensor_op_gmma_f32, 128x64x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::tfloat32_t, LayoutA, 1, + cutlass::tfloat32_t, LayoutB, 1, + float, + Shape<_128,_64,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_128,_64,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 1, + float, LayoutC, 1, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32.cu new file mode 100644 index 0000000000000000000000000000000000000000..bc31d24a6893d114eb2ebcdd864403761fa8efd6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32.cu @@ -0,0 +1,206 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cute/tensor.hpp" +#include "cute/atom/mma_atom.hpp" + +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/collective_builder.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "../../common/cutlass_unit_test.h" + +#include "gemm_testbed_3x.hpp" + +#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) + +using namespace cute; + +TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_tensor_op_gmma_f32, 64x128x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::tfloat32_t, LayoutA, 4, + cutlass::tfloat32_t, LayoutB, 4, + float, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_tf32n_tf32n_f32n_tensor_op_gmma_f32, 64x128x32) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::tfloat32_t, LayoutA, 1, + cutlass::tfloat32_t, LayoutB, 4, + float, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_tf32n_tf32t_f32n_tensor_op_gmma_f32, 64x128x32) { + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::tfloat32_t, LayoutA, 1, + cutlass::tfloat32_t, LayoutB, 1, + float, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 1, + float, LayoutC, 1, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Gemm_tf32t_tf32t_f32n_tensor_op_gmma_f32, 64x128x32) { + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + cutlass::tfloat32_t, LayoutA, 4, + cutlass::tfloat32_t, LayoutB, 1, + float, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + Shape<_64,_128,_32>, Shape<_1,_1,_1>, + cutlass::epilogue::collective::EpilogueTileAuto, + float, float, + float, LayoutC, 4, + float, LayoutC, 4, + cutlass::epilogue::collective::EpilogueScheduleAuto + >::CollectiveOp; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveOp, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + EXPECT_TRUE(test::gemm::device::TestAll()); +} + +/////////////////////////////////////////////////////////////////////////////// + +#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..d386a7e7c06c1a26ad3013371941f877ed695280 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_f32_ls_sm80.cu @@ -0,0 +1,172 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_ls_l_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_ls_u_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_ls_u_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_f32_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_f32_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..07f8564c0787c1864ddb26305a350e287642102a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_f32_rs_sm80.cu @@ -0,0 +1,172 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_rs_l_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_rs_u_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_rs_u_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_fast_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_fast_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..3ad96e428bf16ccf33d467963feef966a8e427b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_fast_f32_ls_sm80.cu @@ -0,0 +1,172 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_ls_l_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_ls_u_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_ls_u_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_fast_f32_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_fast_f32_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..4eb8b7afccaff0e48934b90d01dd9083624a66fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf32n_cf32n_tensor_op_fast_f32_rs_sm80.cu @@ -0,0 +1,172 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_rs_l_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_rs_u_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf32n_cf32n_rs_u_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64_cf64_cf64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64_cf64_cf64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..904b4bf39c484157c93483d68a3eb30e4a8561f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64_cf64_cf64_tensor_op_f64_sm90.cu @@ -0,0 +1,132 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Symm_cf64n_cf64n_ls_l_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Symm_cf64n_cf64n_rs_u_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_ls_f64_gaussian_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_ls_f64_gaussian_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..fa2f57409d1284feed55a02979abd5c78e03224a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_ls_f64_gaussian_sm80.cu @@ -0,0 +1,172 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_ls_l_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_ls_u_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_ls_u_tensor_op_f64_gaussian, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_ls_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_ls_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..3dd4edd2fd04e3fb78303a4b6332865e2aa81301 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_ls_f64_sm80.cu @@ -0,0 +1,172 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_ls_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_ls_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_ls_u_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_rs_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_rs_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..af810d4a4f577942c7989e457e90c6b3fae11316 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_cf64n_cf64n_cf64n_tensor_op_rs_f64_sm80.cu @@ -0,0 +1,172 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_rs_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_rs_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_cf64n_cf64n_rs_u_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Symm = cutlass::gemm::device::Symm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..6cdc04df7ad6a55c3dfd11572e7dc867319707cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu @@ -0,0 +1,489 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Symm_{ElementA/B}{LayoutA/B}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_fast_f32_align1_align1, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_fast_f32_align1_align4, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_fast_f32_align1_align4, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_fast_f32_align1_align4, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_fast_f32_align1_align4, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_fast_f32_align1_align4, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_fast_f32_align1_align4, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_fast_f32_align1_align4, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_fast_f32_align1_align4, 256x128x16_128x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_fast_f32_align1_align4, 128x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_fast_f32_align1_align4, 256x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..1ae9cf92130c8d049d6126b660831e7ca132548e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_rs_sm80.cu @@ -0,0 +1,276 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Symm_{ElementA/B}{LayoutA/B}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_u_tensor_op_fast_f32_align1_align1, 64x128x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_u_tensor_op_fast_f32_align1_align1, 128x64x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_l_tensor_op_fast_f32_align1_align1, 64x128x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +TEST(SM80_Device_Symm_f32n_f32n_rs_u_tensor_op_fast_f32_align1_align4, 64x128x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_u_tensor_op_fast_f32_align1_align4, 128x64x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_l_tensor_op_fast_f32_align1_align4, 64x128x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64_f64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64_f64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..fff373e23dcf023763bc20a2361a56b6bf58d779 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64_f64_tensor_op_f64_sm90.cu @@ -0,0 +1,135 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Symm_f64n_f64n_rs_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Symm_f64t_f64t_ls_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64n_tensor_op_f64_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64n_tensor_op_f64_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..abb5020f952e40c4325374036f1ab7bd4165c794 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64n_tensor_op_f64_ls_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_ls_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_ls_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_ls_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_ls_u_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_ls_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64n_tensor_op_f64_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64n_tensor_op_f64_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..57c4f79fc727587978a179e4d6e718adfbc3c244 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64n_tensor_op_f64_rs_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_rs_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_rs_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_rs_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_rs_u_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64n_rs_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64t_tensor_op_f64_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64t_tensor_op_f64_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..6c82d7683978a518c04f2697e6757f7a41bfa6e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64t_tensor_op_f64_ls_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_ls_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_ls_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_ls_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_ls_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_ls_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64t_tensor_op_f64_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64t_tensor_op_f64_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..a7a44f6064f49dbad42fc908fa25fa300ef96a38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64n_f64t_tensor_op_f64_rs_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_rs_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_rs_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_rs_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_rs_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64n_f64t_rs_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64n_tensor_op_f64_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64n_tensor_op_f64_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..64f4078c12b9f11cc1bfb1c15ed0f1ff0a7eacfc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64n_tensor_op_f64_ls_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_ls_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_ls_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_ls_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_ls_u_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_ls_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64n_tensor_op_f64_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64n_tensor_op_f64_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..21cf9fdc1694536e7bc392cd82a8e13fe0179a5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64n_tensor_op_f64_rs_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_rs_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_rs_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_rs_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_rs_u_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64n_rs_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64t_tensor_op_f64_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64t_tensor_op_f64_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..9fdd1a040b1436e6f9fdab441bf1ebdb36154d42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64t_tensor_op_f64_ls_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_ls_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_ls_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_ls_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_ls_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_ls_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64t_tensor_op_f64_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64t_tensor_op_f64_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..fce589d75cd2c6b3fcccdbef9bf968e2d468465f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_f64t_f64t_tensor_op_f64_rs_sm80.cu @@ -0,0 +1,258 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_rs_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_rs_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_rs_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_rs_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f64t_f64t_rs_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Symm = cutlass::gemm::device::Symm< + ElementA, + LayoutA, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32n_f32n_tensor_op_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32n_f32n_tensor_op_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..7e6e4b42071d830689c87b4a93567ad29f70510d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32n_f32n_tensor_op_f32_ls_sm80.cu @@ -0,0 +1,489 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Symm_{ElementA/B}{LayoutA/B}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_f32_align1_align1, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_f32_align1_align4, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_f32_align1_align4, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_f32_align1_align4, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_f32_align1_align4, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_l_tensor_op_f32_align1_align4, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_f32_align1_align4, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_f32_align1_align4, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_f32_align1_align4, 256x128x16_128x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_f32_align1_align4, 128x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_ls_u_tensor_op_f32_align1_align4, 256x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32n_f32n_tensor_op_f32_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32n_f32n_tensor_op_f32_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..cb9bf2e5ef12d6ee29445718e6b961fa613f4470 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32n_f32n_tensor_op_f32_rs_sm80.cu @@ -0,0 +1,276 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Symm_{ElementA/B}{LayoutA/B}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_u_tensor_op_f32_align1_align1, 64x128x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_u_tensor_op_f32_align1_align1, 128x64x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_l_tensor_op_f32_align1_align1, 64x128x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +TEST(SM80_Device_Symm_f32n_f32n_rs_u_tensor_op_f32_align1_align4, 64x128x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_u_tensor_op_f32_align1_align4, 128x64x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32n_f32n_rs_l_tensor_op_f32_align1_align4, 64x128x32_32x64x32) { + +using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32t_f32t_tensor_op_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32t_f32t_tensor_op_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..a80084c0562c0d705eec4a8ba8259fe33eb34a83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/symm_tf32t_f32t_tensor_op_f32_ls_sm80.cu @@ -0,0 +1,489 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/symm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_symm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Symm_{ElementA/B}{LayoutA/B}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Symm_f32t_f32t_ls_l_tensor_op_f32_align1_align1, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_l_tensor_op_f32_align1_align4, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_l_tensor_op_f32_align1_align4, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_l_tensor_op_f32_align1_align4, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_l_tensor_op_f32_align1_align4, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_l_tensor_op_f32_align1_align4, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_u_tensor_op_f32_align1_align4, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_u_tensor_op_f32_align1_align4, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_u_tensor_op_f32_align1_align4, 256x128x16_128x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_u_tensor_op_f32_align1_align4, 128x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Symm_f32t_f32t_ls_u_tensor_op_f32_align1_align4, 256x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Symm = cutlass::gemm::device::Symm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32n_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32n_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..b559945eab1854bb337f8270175e7d1b4e7376a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32n_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,150 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_cf32n_cf32n_l_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_cf32n_cf32n_u_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..7090a0a64b546782473ee0d55c38e7e14927f36c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32t_tensor_op_f32_sm80.cu @@ -0,0 +1,150 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_cf32n_cf32t_l_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_cf32n_cf32t_u_tensor_op_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32t_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32t_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..0c6efb1e662cdc0780559f0c96bcb14d19fecbd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf32n_cf32t_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,150 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_cf32n_cf32t_l_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_cf32n_cf32t_u_tensor_op_fast_f32, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64_cf64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64_cf64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..cb9f419ded7c7ed8804e68c82a9f6af05f32b8ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64_cf64_tensor_op_f64_sm90.cu @@ -0,0 +1,149 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Syr2k_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Syr2k_cf64n_cf64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64n_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64n_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..cea16919f79570f410a0884b4819c6dc0b91a56b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64n_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,308 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_l_tensor_op_cf64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_l_tensor_op_cf64, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_l_tensor_op_cf64, 64x32x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_l_tensor_op_cf64, 32x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_u_tensor_op_cf64, 32x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_u_tensor_op_cf64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_u_tensor_op_cf64, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64t_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64t_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..b3e2e2769e948857757dee1d523a8aa809560a30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64t_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,168 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64t_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64t_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64t_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64t_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..30dc4bab2b5f7ec5be4bb70af25b79c6b6c789e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64n_cf64t_tensor_op_f64_sm80.cu @@ -0,0 +1,150 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_cf64n_cf64t_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_cf64n_cf64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + 1, // AlignmentB + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64t_cf64n_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64t_cf64n_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..75ade1fd5d60524ead671254ac15aeb204198870 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64t_cf64n_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,168 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::complex; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAddComplex, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f32n_f32n_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f32n_f32n_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..e310ac86586986479d79f24ae60a1b4aa629d174 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f32n_f32n_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,132 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f32n_f32n_l_tensor_op_fast_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f32n_f32n_u_tensor_op_fast_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f32t_f32n_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f32t_f32n_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..e24a150027105e7ad4963e023d57b96ed61175e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f32t_f32n_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,133 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f32t_f32n_l_tensor_op_fast_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f32t_f32n_u_tensor_op_fast_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64_f64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64_f64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..7af7a25c05d82dff93a06154e2bc04f096bee32c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64_f64_tensor_op_f64_sm90.cu @@ -0,0 +1,133 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Syr2k_f64n_f64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Syr2k_f64t_f64n_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64n_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64n_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..24f832dcb1eb881a69aec5f07a7c5796063918ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64n_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,483 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_l_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_l_tensor_op_f64, 32x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_u_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_u_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_u_tensor_op_f64, 32x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_u_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64n_u_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64n_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64n_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..9cf917355aa712d043c73d9292ccae41551bee21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64n_tensor_op_f64_sm80.cu @@ -0,0 +1,253 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64n_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64n_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64t_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64t_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..e7b165fd642dc7dd96888ce41c1e1153c5a2e570 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64t_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,273 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64t_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64t_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64t_l_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64t_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64t_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64n_f64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64t_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64t_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..e3fb6eeed540ffca94176033f2c05baee3d47493 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64n_f64t_tensor_op_f64_sm80.cu @@ -0,0 +1,253 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64t_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64t_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64t_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64t_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64n_f64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64n_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64n_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..b53b7105088c7b06cead835d2f75127d0f70c8c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64n_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,308 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64n_l_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64n_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64n_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64n_u_tensor_op_f64, 64x32x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64n_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64n_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..f720f880ba2d88bdc1ee70b3b1571d281ff90b9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64n_tensor_op_f64_sm80.cu @@ -0,0 +1,253 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64t_f64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64t_f64n_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64t_f64n_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64t_f64n_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_f64t_f64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64t_tensor_op_f64_grouped_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64t_tensor_op_f64_grouped_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..f9292e7373ad1018d1726830c3963b7e0d35ad5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_f64t_f64t_tensor_op_f64_grouped_sm80.cu @@ -0,0 +1,308 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_grouped_rank_2k.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64t_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64t_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64t_l_tensor_op_f64, 32x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64t_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64t_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2kGrouped_f64t_f64t_u_tensor_op_f64, 32x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< + ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1, + ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1, + ElementC, LayoutC, cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + cutlass::arch::OpMultiplyAdd, + cutlass::BlasMode::kSymmetric>::Rank2Kkernel; + + using Rank2K = cutlass::gemm::device::Rank2KGrouped; + + test::gemm::device::TestbedGrouped testbed; + bool passed = testbed.run(24); + EXPECT_TRUE(passed); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_tf32n_f32n_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_tf32n_f32n_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..c6bb3b15a01375c174e8aa5c27330f817ada14b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_tf32n_f32n_tensor_op_f32_sm80.cu @@ -0,0 +1,132 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_tf32n_f32n_l_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_tf32n_f32n_u_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_tf32t_f32n_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_tf32t_f32n_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..25e62fe569649b89025f21cd63a649af4ee1650b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syr2k_tf32t_f32n_tensor_op_f32_sm80.cu @@ -0,0 +1,133 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank2k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_tf32t_f32n_l_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syr2k_tf32t_f32n_u_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = float; + + using Rank2K = cutlass::gemm::device::Rank2K< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRank2KUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32n_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32n_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..dcd963b8880a7a9fee820d855b87cc99c0b19732 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32n_tensor_op_f32_sm80.cu @@ -0,0 +1,137 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf32n_cf32n_l_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf32n_cf32n_u_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32n_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32n_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..007faad7df98e973f795265d0a369b5a6650a967 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32n_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,137 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf32n_cf32n_l_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf32n_cf32n_u_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..5d90211b8ebe8e3ec1f177248a72d20716f05e69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32t_tensor_op_f32_sm80.cu @@ -0,0 +1,137 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf32n_cf32t_l_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf32n_cf32t_u_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32t_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32t_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..85301bc2da93c351b2df643e61db01b214e27981 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf32n_cf32t_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,137 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf32n_cf32t_l_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf32n_cf32t_u_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64_cf64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64_cf64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..37a8a42ab5aa38ff27f7729a9cb5ed029fed00cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64_cf64_tensor_op_f64_sm90.cu @@ -0,0 +1,135 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Syrk_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Syrk_cf64n_cf64t_l_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64n_cf64n_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64n_cf64n_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..38881166b02a3b3d5cd6c110e8091cd8e74860c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64n_cf64n_tensor_op_f64_sm80.cu @@ -0,0 +1,136 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf64n_cf64n_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..b826f05d2ceab442c48d1a10ac4115b71657f6c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu @@ -0,0 +1,95 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_cf64n_cf64t_l_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementA = cutlass::complex; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = cutlass::complex; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = cutlass::complex; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, // kStages + 1, // AlignmentA + false, // SplitKSerial + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kNone, + cutlass::BlasMode::kSymmetric + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f32n_f32t_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f32n_f32t_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..8f4e9f9e76b99e9d0ca91a18a3442163b2e0dd73 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f32n_f32t_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,541 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 128x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 256x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 64x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 256x64x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 128x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 64x128x32_32x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 128x64x32_64x32x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 128x128x16_64x64x16) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_l_tensor_op_fast_f32, 64x128x16_32x64x16) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_u_tensor_op_fast_f32, 128x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_u_tensor_op_fast_f32, 256x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_u_tensor_op_fast_f32, 64x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_u_tensor_op_fast_f32, 256x64x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32n_f32t_u_tensor_op_fast_f32, 128x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f32t_f32t_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f32t_f32t_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..4dbd5b0bd63ae289546813e76a6941b0e5b0ff6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f32t_f32t_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,541 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 128x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 256x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 64x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 256x64x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 128x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 64x128x32_32x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 128x64x32_64x32x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 128x128x16_64x64x16) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_l_tensor_op_fast_f32, 64x128x16_32x64x16) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_u_tensor_op_fast_f32, 128x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_u_tensor_op_fast_f32, 256x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_u_tensor_op_fast_f32, 64x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_u_tensor_op_fast_f32, 256x64x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f32t_f32t_u_tensor_op_fast_f32, 128x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f64_f64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f64_f64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..b98a58904fcbfda7fdd52df778b081316443c0e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f64_f64_tensor_op_f64_sm90.cu @@ -0,0 +1,125 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Syrk_f64n_f64t_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Syrk_f64t_f64n_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::ColumnMajor; + using ElementAccumulator = double; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f64n_f64t_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f64n_f64t_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..62d29af11925ff4e045e2542874f22a26975cf7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_f64n_f64t_tensor_op_f64_sm80.cu @@ -0,0 +1,237 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f64n_f64t_l_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f64n_f64t_l_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f64n_f64t_l_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f64n_f64t_l_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_f64n_f64t_u_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = double; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_tf32n_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_tf32n_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..ba96ad508f3fdbe73970927836dc9b2b11f63006 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_tf32n_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,541 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 128x128x16_64x64x16) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_l_tensor_op_f32, 64x128x16_32x64x16) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_u_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_u_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_u_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_u_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32n_f32t_u_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_tf32t_f32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_tf32t_f32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..a1466d6c3e3cd9af6cc800031cc5ff24b5c74d1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/syrk_tf32t_f32t_tensor_op_f32_sm80.cu @@ -0,0 +1,541 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide SYRK interface + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_rank_k_universal.h" + +#if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 64x128x32_32x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 128x64x32_64x32x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 128x128x16_64x64x16) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_l_tensor_op_f32, 64x128x16_32x64x16) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kLower, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 6 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_u_tensor_op_f32, 128x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_u_tensor_op_f32, 256x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_u_tensor_op_f32, 64x256x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_u_tensor_op_f32, 256x64x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 64, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Syrk_tf32t_f32t_u_tensor_op_f32, 128x128x32_64x64x32) { + + using ElementA = float; + using LayoutA = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + using ElementAccumulator = float; + + using RankK = cutlass::gemm::device::RankK< + ElementA, + LayoutA, + ElementC, + LayoutC, + cutlass::FillMode::kUpper, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementC, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // if (CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..3e3178cbc309fd6188cde028099a88561eb7e0af --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed.h @@ -0,0 +1,599 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed_utils.h" +#include "testbed_universal.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct Testbed { + + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using ElementC = typename Gemm::ElementC; + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute; + + /// Initialization + typename Gemm::LayoutA::Stride stride_factor_A; + typename Gemm::LayoutB::Stride stride_factor_B; + typename Gemm::LayoutC::Stride stride_factor_C; + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D; + cutlass::HostTensor reference_D; + + // + // Methods + // + + Testbed( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + stride_factor_A(typename Gemm::LayoutA::Stride()), + stride_factor_B(typename Gemm::LayoutB::Stride()), + stride_factor_C(typename Gemm::LayoutC::Stride()), + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + Testbed( + typename Gemm::LayoutA::Stride stride_factor_A_, + typename Gemm::LayoutB::Stride stride_factor_B_, + typename Gemm::LayoutC::Stride stride_factor_C_, + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + stride_factor_A(stride_factor_A_), + stride_factor_B(stride_factor_B_), + stride_factor_C(stride_factor_C_), + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size) { + // + // Allocate the GEMM workspace + // + + tensor_A.resize(problem_size.mk(), cutlass::layout::Affine2Layout_Factory::layout_factory(problem_size.mk(), stride_factor_A)); + tensor_B.resize(problem_size.kn(), cutlass::layout::Affine2Layout_Factory::layout_factory(problem_size.kn(), stride_factor_B)); + tensor_C.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory::layout_factory(problem_size.mn(), stride_factor_C)); + tensor_D.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory::layout_factory(problem_size.mn(), stride_factor_C)); + reference_D.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory::layout_factory(problem_size.mn(), stride_factor_C), false); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1); + tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1); + tensor_C.host_view().at(cutlass::make_Coord(0, 0)) = typename Gemm::ElementC(1); + + cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + + if (tensor_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + + if (reference_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view()); + + EXPECT_TRUE(passed); + + if (!passed) { + + std::stringstream fname; + + fname << "error_Gemm_device_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << Gemm::ThreadblockShape::kM << "x" + << Gemm::ThreadblockShape::kN << "x" + << Gemm::ThreadblockShape::kK << "_" + << Gemm::WarpShape::kM << "x" + << Gemm::WarpShape::kN << "x" + << Gemm::WarpShape::kK << ".txt"; + + std::ofstream file(fname.str()); + + file + << "problem: " << problem_size + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << tensor_A.host_view() + << "\nB =\n" << tensor_B.host_view() + << "\nC =\n" << tensor_C.host_view() + << "\n\nReference =\n" << reference_D.host_view() + << "\nComputed =\n" << tensor_D.host_view(); + } + + return passed; + } + + /// Verifies the result is a GEMM + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + // + // Verify + // + + cutlass::reference::host::Gemm< + typename Gemm::ElementA, typename Gemm::LayoutA, + typename Gemm::ElementB, typename Gemm::LayoutB, + typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute, + ElementAccumulator, typename Gemm::Operator> + reference_gemm; + + reference_gemm( + problem_size, + alpha, + tensor_A.host_ref(), + tensor_B.host_ref(), + beta, + reference_D.host_ref(), + ElementAccumulator(0) + ); + + if (Relu) { + for (int i = 0; i < problem_size.m(); ++i) { + for (int j = 0; j < problem_size.n(); ++j) { + reference_D.at(cutlass::MatrixCoord(i, j)) = + ((ElementCompute)reference_D.at(cutlass::MatrixCoord(i, j)) < (ElementCompute)0) + ? (typename Gemm::ElementC)0 + : reference_D.at(cutlass::MatrixCoord(i, j)); + } + } + } + + return compare_reference(problem_size, alpha, beta); + } + + /// Determine if the CUDA device is sufficient to run the kernel + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + + /// Executes one test + bool run( + cutlass::gemm::GemmCoord problem_size, + int split_k_slices = 1, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) + { +/* + std::cout << "\n-----------------------\n"; + std::cout << "problem size: " << problem_size << "\n"; + std::cout << "split_k_slices: " << split_k_slices << "\n"; + std::cout << "alpha: " << alpha << "\n"; + std::cout << "beta: " << beta << "\n"; + std::cout << "-----------------------\n\n"; +*/ + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + this->initialize(problem_size); + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + problem_size, + tensor_A.device_ref(), + tensor_B.device_ref(), + tensor_C.device_ref(), + tensor_D.device_ref(), + {alpha, beta}, + split_k_slices + }; + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); + + if (status != cutlass::Status::kSuccess) { + cudaError_t error = cudaGetLastError(); + std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; + return true; + } + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = this->verify(problem_size, alpha, beta); + + if (!passed) { + std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +bool TestAllGemmBasic( + const typename Gemm::LayoutA::Stride& stride_factor_A = typename Gemm::LayoutA::Stride(), + const typename Gemm::LayoutB::Stride& stride_factor_B = typename Gemm::LayoutB::Stride(), + const typename Gemm::LayoutC::Stride& stride_factor_C = typename Gemm::LayoutC::Stride()) { + bool passed = true; + + int const kMinimumOperandElementSize = + std::min( + int(cutlass::sizeof_bits::value), + int(cutlass::sizeof_bits::value)); + + int const kAlignment = cutlass::platform::is_same< + typename Gemm::OperatorClass, + cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; + + // int8_t gemm alignment constraints + int const kAlignmentM = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value ? 4 : kAlignment; + + int const kAlignmentN = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value ? 4 : kAlignment; + + int const kAlignmentK = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + (cutlass::platform::is_same::value || + cutlass::platform::is_same::value) ? 4 : kAlignment; + + int problem_size_m[] = {kAlignmentM, 512 - 3 * kAlignmentM}; + + int problem_size_n[] = {kAlignmentN, 512 - 2 * kAlignmentN}; + + int problem_size_k[] = { + kAlignmentK, Gemm::ThreadblockShape::kK * (Gemm::kStages + 1) - kAlignmentK}; + + int split_k_slices[] = { + 1, 2, 3 + }; + + double problem_alpha[] = { + 1 + }; + + double problem_beta[] = { + 2.0 + }; + + Testbed testbed(stride_factor_A, stride_factor_B, stride_factor_C); + + using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute; + + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + for (int split_k : split_k_slices) { + + if (!Gemm::kSplitKSerial && split_k > 1) { + continue; + } + + if (split_k > 1 && k / Gemm::ThreadblockShape::kK < split_k) { + continue; + } + + for (auto alpha : problem_alpha) { + for (auto beta : problem_beta) { + + cutlass::gemm::GemmCoord problem_size(m, n, k); + passed = testbed.run( + problem_size, + split_k, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + if (!passed) { + return false; + } + } + } + } + } + } + } + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +bool TestAllGemm( + const typename Gemm::LayoutA::Stride& stride_factor_A, + const typename Gemm::LayoutB::Stride& stride_factor_B = typename Gemm::LayoutB::Stride(), + const typename Gemm::LayoutC::Stride& stride_factor_C = typename Gemm::LayoutC::Stride()) +{ + // Test basic GEMM with non-default stride factors + return TestAllGemmBasic(stride_factor_A, stride_factor_B, stride_factor_C); +} + +template +bool TestAllGemm() +{ +#ifdef NDEBUG + // Non-debug builds also test basic GEMM with default stride factors + if (!TestAllGemmBasic()) { + return false; + } +#endif // NDEBUG + + // Test universal GEMM +#if 0 + // Define the universal kernel + using UniversalKernel = cutlass::gemm::kernel::GemmUniversal< + typename Gemm::GemmKernel::Mma, // Mma + typename Gemm::GemmKernel::Epilogue, // Epilogue + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<> // ThreadblockSwizzle + >; +#else + // Define the streamk universal kernel + using UniversalKernel = cutlass::gemm::kernel::GemmUniversalStreamk< + typename Gemm::GemmKernel::Mma, // Mma + typename Gemm::GemmKernel::Epilogue, // Epilogue + cutlass::gemm::threadblock::ThreadblockSwizzleStreamK // ThreadblockSwizzle + >; +#endif + + // Define the universal adaptor + using UniversalGemm = cutlass::gemm::device::GemmUniversalAdapter; + + // Test universal GEMM + return TestAllGemmUniversal(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +template +bool TestGemmPerf(int iterations = 1) { + bool passed = true; + + int problem_size_m[] = { 2048 }; + + int problem_size_n[] = { 4352 }; + + int problem_size_k[] = { 4096 }; + + int split_k_slices[] = { 1 }; + double problem_alpha[] = { 1 }; + double problem_beta[] = { 0.0 }; + + Testbed testbed; + + using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute; + + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + for (int split_k : split_k_slices) { + + if (!Gemm::kSplitKSerial && split_k > 1) { + continue; + } + + for (auto alpha : problem_alpha) { + for (auto beta : problem_beta) { + + cutlass::gemm::GemmCoord problem_size(m, n, k); + + for (int i = 0; i < iterations; i++){ + passed = testbed.run( + problem_size, + split_k, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + } + + if (!passed) { + return false; + } + } + } + } + } + } + } + + return passed; +} + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..244bc0682e6a1d85bb8a0bfd7478179f8ec7a207 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_complex.h @@ -0,0 +1,294 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm_complex.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct TestbedComplex : public Testbed { + + using Base = Testbed; + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using ElementC = typename Gemm::ElementC; + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute; + + + // + // Methods + // + + TestbedComplex( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + Base(init_A_, init_B_, init_C_, seed_) { } + + + /// Verifies the result is a GEMM + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + // + // Verify + // + + cutlass::reference::host::GemmComplex( + problem_size, + alpha, + this->tensor_A.host_ref(), + Gemm::kTransformA, + this->tensor_B.host_ref(), + Gemm::kTransformB, + beta, + this->tensor_C.host_ref(), + this->reference_D.host_ref(), + ElementAccumulator(0) + ); + + return this->compare_reference(problem_size, alpha, beta); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmCoord problem_size, + int split_k_slices = 1, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + // + // Initialize workspace + // + + this->initialize(problem_size); + + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + problem_size, + this->tensor_A.device_ref(), + this->tensor_B.device_ref(), + this->tensor_C.device_ref(), + this->tensor_D.device_ref(), + {alpha, beta}, + split_k_slices + }; + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = this->verify(problem_size, alpha, beta); + + if (!passed) { + std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +bool TestAllGemmComplex() { + bool passed = true; + + using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute; + + int const kMinimumOperandElementSize = + std::min( + int(cutlass::sizeof_bits::value), + int(cutlass::sizeof_bits::value)); + + int const kAlignment = + cutlass::platform::is_same< + typename Gemm::OperatorClass, + cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; + + int problem_size_m[] = { + kAlignment, 512 - 3*kAlignment + }; + + int problem_size_n[] = { + kAlignment, 512 - 2*kAlignment + }; + + int problem_size_k[] = { + kAlignment, 128 - kAlignment + }; + + int split_k_slices[] = { + 1, 2, 3 + }; + + double problem_alpha[] = { + 1 + }; + + double problem_beta[] = { + 2.0 + }; + + TestbedComplex testbed; + + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + for (int split_k : split_k_slices) { + + if (!Gemm::kSplitKSerial && split_k > 1) { + continue; + } + + for (auto alpha : problem_alpha) { + for (auto beta : problem_beta) { + + cutlass::gemm::GemmCoord problem_size(m, n, k); + + passed = testbed.run( + problem_size, + split_k, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + if (!passed) { + return false; + } + } + } + } + } + } + } + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_gemm_with_broadcast.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_gemm_with_broadcast.h new file mode 100644 index 0000000000000000000000000000000000000000..9336874baeffd1787d7f205c55bae34d1862ad94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_gemm_with_broadcast.h @@ -0,0 +1,657 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/gemm_complex.h" + +#include "testbed_utils.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct GemmWithBroadcastReferenceOp { + + using OutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; + + using ElementCompute = typename OutputOp::ElementCompute; + using ElementZ = typename OutputOp::ElementZ; + using ElementT = typename OutputOp::ElementT; + + typename OutputOp::BinaryOp binary_op; + typename OutputOp::ElementwiseOp elementwise_op; + + GemmWithBroadcastReferenceOp() { } + + void operator()(ElementZ &Z, ElementT &T, ElementCompute gemm, ElementCompute bias) { + + ElementCompute t_full = binary_op(gemm, bias); + T = ElementT(t_full); + + ElementCompute z_full = elementwise_op(t_full); + Z = ElementZ(z_full); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Fused testbed +// +// Y = GEMM(AB, C) +// +// T[i, j] = BinaryOp(Y[i, j], Broadcast[i]) +// +// Z[i, j] = Elementwise(T[i, j]) +// + +template < + typename Gemm, + typename ReferenceOp = GemmWithBroadcastReferenceOp +> +struct TestbedGemmWithBroadcast { + + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using OutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; + using ElementC = typename Gemm::ElementC; + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = typename OutputOp::ElementCompute; + using ElementVector = typename OutputOp::ElementVector; + using ElementZ = typename OutputOp::ElementZ; + using ElementT = typename OutputOp::ElementT; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + cutlass::HostTensor tensor_A; // Input A + cutlass::HostTensor tensor_B; // Input B + cutlass::HostTensor tensor_C; // Input C + cutlass::HostTensor tensor_Broadcast; // Input Broadcast + + cutlass::HostTensor tensor_Z; + cutlass::HostTensor tensor_T; + + cutlass::HostTensor tensor_C_ref; + cutlass::HostTensor tensor_Y_ref; + cutlass::HostTensor tensor_Z_ref; + cutlass::HostTensor tensor_T_ref; + + + // + // Methods + // + + TestbedGemmWithBroadcast( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size) { + // + // Allocate the GEMM workspace + // + + tensor_A.resize(problem_size.mk()); + tensor_B.resize(problem_size.kn()); + tensor_C.resize(problem_size.mn()); + tensor_Z.resize(problem_size.mn()); + tensor_T.resize(problem_size.mn()); + tensor_Broadcast.resize({ + problem_size.m(), + 1 + }); + + tensor_C_ref.resize(problem_size.mn()); + tensor_Y_ref.resize(problem_size.mn()); + tensor_Z_ref.resize(problem_size.mn()); + tensor_T_ref.resize(problem_size.mn()); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); + EXPECT_TRUE(initialize_tensor(tensor_Broadcast.host_view(), init_C, seed + 2020)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1); + tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1); + tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1); + + for (int m = 0; m < tensor_C_ref.extent().row(); ++m) { + for (int n = 0; n < tensor_C_ref.extent().column(); ++n) { + tensor_C_ref.at({m, n}) = ElementAccumulator(tensor_C.at({m, n})); + } + } + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_Broadcast.sync_device(); + + tensor_Z.sync_device(); + tensor_T.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, + ElementAccumulator alpha, + ElementAccumulator beta) { + + tensor_Z.sync_host(); + tensor_T.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Z.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_T.host_view()), 0); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Z_ref.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_T_ref.host_view()), 0); + + bool passed = true; + float norm_diff = 0; + + if (OutputOp::kStoreZ) { + norm_diff = cutlass::reference::host::TensorNormDiff(tensor_Z_ref.host_view(), tensor_Z.host_view(), float()); + passed = (norm_diff <= 0.1f); + EXPECT_LT(norm_diff, 0.1f) << " tensor_Z is incorrect"; + } + + if (OutputOp::kStoreT) { + + norm_diff = cutlass::reference::host::TensorNormDiff(tensor_T_ref.host_view(), tensor_T.host_view(), float()); + passed = (passed && (norm_diff <= 0.1f)); + + EXPECT_LT(norm_diff, 0.1f) << " tensor_T is incorrect"; + } + + + if (!passed) { + + /* + std::stringstream fname; + + fname << "error_Gemm_device_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << Gemm::ThreadblockShape::kM << "x" + << Gemm::ThreadblockShape::kN << "x" + << Gemm::ThreadblockShape::kK << "_" + << Gemm::WarpShape::kM << "x" + << Gemm::WarpShape::kN << "x" + << Gemm::WarpShape::kK << ".txt"; + + std::ofstream file(fname.str()); + */ + + std::ofstream file("errors_testbed_gemm_with_broadcast.txt"); + + + file + << "problem: " << problem_size + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << tensor_A.host_view() + << "\nB =\n" << tensor_B.host_view() + << "\nC =\n" << tensor_C.host_view() + << "\nZ =\n" << tensor_Z.host_view() + << "\nT =\n" << tensor_T.host_view() + << "\n\n" + << "\nY_ref =\n" << tensor_Y_ref.host_view() + << "\nZ_ref =\n" << tensor_Z_ref.host_view() + << "\nT_ref =\n" << tensor_T_ref.host_view(); + } + + return passed; + } + + /// Verifies the result is a GEMM + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementAccumulator alpha, + ElementAccumulator beta) { + + // + // Verify + // + + cutlass::reference::host::GemmComplex< + typename Gemm::ElementA, typename Gemm::LayoutA, + typename Gemm::ElementB, typename Gemm::LayoutB, + ElementAccumulator, typename Gemm::LayoutC, + ElementAccumulator, ElementAccumulator + >( + problem_size, + alpha, + tensor_A.host_ref(), + Gemm::kTransformA, + tensor_B.host_ref(), + Gemm::kTransformB, + beta, + tensor_C_ref.host_ref(), + tensor_Y_ref.host_ref(), + ElementAccumulator(0) + ); + + using ElementC = typename Gemm::ElementC; + + ReferenceOp reference_op; + + // compute tensor Z and tensor T + for (int m = 0; m < problem_size.m(); ++m) { + for (int n = 0; n < problem_size.n(); ++n) { + + ElementZ z; + ElementT t; + + reference_op(z, t, tensor_Y_ref.at({m, n}), tensor_Broadcast.at({m, 0})); + + tensor_Z_ref.at({m, n}) = z; + tensor_T_ref.at({m, n}) = t; + } + } + + return compare_reference(problem_size, alpha, beta); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmUniversalMode mode, + cutlass::gemm::GemmCoord problem_size, + int batch_count = 1, + ElementAccumulator alpha = ElementAccumulator(1), + ElementAccumulator beta = ElementAccumulator(0)) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + this->initialize(problem_size); + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + mode, + problem_size, + batch_count, + {alpha, beta}, + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data(), + tensor_Z.device_data(), + tensor_Broadcast.device_data(), + tensor_T.device_data(), + problem_size.m() * problem_size.k(), + problem_size.n() * problem_size.k(), + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + problem_size.m(), + problem_size.m() * problem_size.n(), + tensor_A.layout().stride(0), + tensor_B.layout().stride(0), + tensor_C.layout().stride(0), + tensor_Z.layout().stride(0), + 0, // This must be zero + tensor_T.layout().stride(0), + }; + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); + + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = true; + + passed = this->verify(problem_size, alpha, beta); + + if (!passed) { + std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl; + } + + // + // Profile + // + + #if 0 // profiling disabled for now. + + int const kWorkspaces = 100; + + cutlass::DeviceAllocation profiling_tensor_A(tensor_A.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_B(tensor_B.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_C(tensor_C.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_Broadcast(tensor_Broadcast.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_Z(tensor_Z.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_T(tensor_T.capacity() * kWorkspaces); + + cudaEvent_t events[2]; + for (auto & event : events) { + cudaError_t result = cudaEventCreate(&event); + if (result != cudaSuccess) { + EXPECT_EQ(result, cudaSuccess) << " cudaEventCreate() failed with error " << cudaGetErrorString(result); + return false; + break; + } + } + + int const kWarmupIterations = 5; + int const kProfilingIterations = 100; + + for (int i = 0; i < kWarmupIterations; ++i) { + status = gemm_op(); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + } + + + cudaError_t result = cudaEventRecord(events[0]); + EXPECT_EQ(result, cudaSuccess); + + for (int i = 0; i < kProfilingIterations; ++i) { + + typename Gemm::Arguments arguments{ + mode, + problem_size, + batch_count, + {alpha, beta}, + profiling_tensor_A.get() + tensor_A.capacity() * (i % kWorkspaces), + profiling_tensor_B.get() + tensor_B.capacity() * (i % kWorkspaces), + profiling_tensor_C.get() + tensor_C.capacity() * (i % kWorkspaces), + profiling_tensor_Z.get() + tensor_Z.capacity() * (i % kWorkspaces), + profiling_tensor_Broadcast.get() + tensor_Broadcast.capacity() * (i % kWorkspaces), + profiling_tensor_T.get() + tensor_T.capacity() * (i % kWorkspaces), + problem_size.m() * problem_size.k(), + problem_size.n() * problem_size.k(), + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + problem_size.m(), + problem_size.m() * problem_size.n(), + tensor_A.layout().stride(0), + tensor_B.layout().stride(0), + tensor_C.layout().stride(0), + tensor_Z.layout().stride(0), + 0, // This must be zero + tensor_T.layout().stride(0), + }; + + gemm_op.initialize(arguments, workspace.get()); + status = gemm_op(); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + } + + result = cudaEventRecord(events[1]); + EXPECT_EQ(result, cudaSuccess); + + result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess); + + float elapsed_time = 0; + result = cudaEventElapsedTime(&elapsed_time, events[0], events[1]); + EXPECT_EQ(result, cudaSuccess); + + double average_time = double(elapsed_time) / double(kProfilingIterations); + + std::cout << problem_size << ": " << average_time << " ms" << std::endl; + + for (auto & event : events) { + cudaEventDestroy(event); + } + #endif + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Gemm, + typename ReferenceOp = GemmWithBroadcastReferenceOp +> +bool TestGemmWithBroadcast( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmUniversalMode mode, + int batch_count, + double alpha = 1.0, + double beta = 2.0) { + + bool passed = true; + + TestbedGemmWithBroadcast testbed; + + using ElementAccumulator = typename Gemm::ElementAccumulator; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Gemm, + typename ReferenceOp = GemmWithBroadcastReferenceOp +> +bool TestAllGemmWithBroadcast() { + + int M_problems[] = {8, 136, 264, 520}; + int N_problems[] = {8, 136, 264, 520}; + int K_problems[] = {8, 136, 264, 520}; + double alpha_problems[] = {1.25, 2.25}; + double beta_problems[] = {0, 1, 2.0}; + + bool passed = true; + + for (int M : M_problems) { + for (int N : N_problems) { + for (int K : K_problems) { + for (double alpha : alpha_problems) { + for (double beta : beta_problems) { + + TestbedGemmWithBroadcast testbed; + + using ElementAccumulator = typename Gemm::ElementAccumulator; + + passed = testbed.run( + cutlass::gemm::GemmUniversalMode::kGemm, + {M, N, K}, + 1, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + EXPECT_TRUE(passed) + << "M: " << M << ", N: " << N << ", K: " << K << ", alpha: " << alpha << ", beta: " << beta; + + if (!passed) { + + return passed; + } + } + } + } + } + } + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_gemm_with_reduction.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_gemm_with_reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..5c3e7353e72b4bebd6a30153628a37b14d4a4a06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_gemm_with_reduction.h @@ -0,0 +1,588 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/gemm_complex.h" + +#include "testbed_utils.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct GemmWithReductionReference { + + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = typename Gemm::GemmKernel::Epilogue::ElementCompute; + using ElementC = typename Gemm::ElementC; + using ElementT = typename Gemm::GemmKernel::Epilogue::ElementTensor; + // + // Data members + // + + BinaryOp binary_op; + + // + // Methods + // + + GemmWithReductionReference() { } + + ElementCompute operator()( + ElementAccumulator d_y, + ElementT t) { + + return binary_op(ElementCompute(d_y), ElementCompute(t)); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Gemm, + typename ReferenceOp +> +struct TestbedGemmWithReduction { + + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using ElementC = typename Gemm::ElementC; + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementT = typename Gemm::GemmKernel::Epilogue::ElementTensor; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D; + cutlass::HostTensor tensor_Reduction; + cutlass::HostTensor tensor_Tensor; + cutlass::HostTensor tensor_C_ref; + cutlass::HostTensor reference_d_Y; + cutlass::HostTensor reference_D; + cutlass::HostTensor reference_Reduction; + + // + // Methods + // + + TestbedGemmWithReduction( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + for (int m = 0; m < view.extent().row(); ++m) { + for (int n = 0; n < view.extent().column(); ++n) { + //view.at({m, n}) = Element(float(((idx ++) % 17) - 8)); + view.at({m, n}) = (n == 0 ? Element(m) : Element()); + + } + } + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size) { + // + // Allocate the GEMM workspace + // + + tensor_A.resize(problem_size.mk()); + tensor_B.resize(problem_size.kn()); + tensor_C.resize(problem_size.mn()); + tensor_D.resize(problem_size.mn()); + + tensor_Reduction.resize({ + problem_size.m(), + (problem_size.n() - 1 + Gemm::ThreadblockShape::kN) / Gemm::ThreadblockShape::kN + }); + + tensor_Tensor.resize(problem_size.mn()); + reference_D.resize(problem_size.mn(), false); + reference_d_Y.resize(problem_size.mn(), false); + tensor_C_ref.resize(problem_size.mn(), false); + reference_Reduction.resize({problem_size.m(), 1}, false); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); + EXPECT_TRUE(initialize_tensor(tensor_Tensor.host_view(), init_C, seed + 2020)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1); + tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1); + tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1); + + for (int m = 0; m < tensor_C_ref.extent().row(); ++m) { + for (int n = 0; n < tensor_C_ref.extent().column(); ++n) { + tensor_C_ref.at({m, n}) = ElementAccumulator(tensor_C.at({m, n})); + } + } + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + tensor_Reduction.sync_device(); + tensor_Tensor.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, + ElementAccumulator alpha, + ElementAccumulator beta) { + + tensor_Reduction.sync_host(); + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Reduction.host_view()), 0); + + bool passed = true; + for (int m = 0; m < tensor_Reduction.extent().row(); ++m) { + + ElementAccumulator reduced_value = ElementAccumulator(); + for (int j = 0; j < tensor_Reduction.extent().column(); ++j) { + reduced_value += tensor_Reduction.at({m, j}); + } + + if (reduced_value != reference_Reduction.at({m, 0})) { + std::cout << "Error in bias[" << m << "] - Expected: " << reference_Reduction.at({m, 0}) << ", got: " << reduced_value << std::endl; + passed = false; + break; + } + } + EXPECT_TRUE(passed) << "Reduction is incorect."; + + if (!cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view())) { + EXPECT_TRUE(false) << " mismatched reference"; + passed = false; + } + + if (!passed) { + + /* + std::stringstream fname; + + fname << "error_Gemm_device_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << Gemm::ThreadblockShape::kM << "x" + << Gemm::ThreadblockShape::kN << "x" + << Gemm::ThreadblockShape::kK << "_" + << Gemm::WarpShape::kM << "x" + << Gemm::WarpShape::kN << "x" + << Gemm::WarpShape::kK << ".txt"; + + std::ofstream file(fname.str()); + */ + + std::ofstream file("testbed_universal_errors_sm70.txt"); + + file + << "problem: " << problem_size + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << tensor_A.host_view() + << "\nB =\n" << tensor_B.host_view() + << "\nC =\n" << tensor_C.host_view() + << "\nT = \n" << tensor_Tensor.host_view() + << "\n\nReference =\n" << reference_D.host_view() + << "\nComputed =\n" << tensor_D.host_view() + << "\n\nReduction =\n" << tensor_Reduction.host_view() << "\n" + << "\nReference reduction =\n" << reference_Reduction.host_view() << "\n"; + } + + return passed; + } + + /// Verifies the result is a GEMM + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementAccumulator alpha, + ElementAccumulator beta) { + + // + // Verify + // + + cutlass::reference::host::GemmComplex< + typename Gemm::ElementA, typename Gemm::LayoutA, + typename Gemm::ElementB, typename Gemm::LayoutB, + ElementAccumulator, typename Gemm::LayoutC, + ElementAccumulator, ElementAccumulator + >( + problem_size, + alpha, + tensor_A.host_ref(), + Gemm::kTransformA, + tensor_B.host_ref(), + Gemm::kTransformB, + beta, + tensor_C_ref.host_ref(), + reference_d_Y.host_ref(), + ElementAccumulator(0) + ); + + using ElementC = typename Gemm::ElementC; + + ReferenceOp reference_op; + + // compute backwards + for (int m = 0; m < problem_size.m(); ++m) { + ElementAccumulator reduced_value = ElementAccumulator(); + for (int n = 0; n < problem_size.n(); ++n) { + ElementAccumulator d_full = reference_op(reference_d_Y.at({m, n}), tensor_Tensor.at({m, n})); + reduced_value += d_full; + reference_D.at({m, n}) = ElementC(d_full); + } + reference_Reduction.at({m, 0}) = reduced_value; + } + + return compare_reference(problem_size, alpha, beta); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmUniversalMode mode, + cutlass::gemm::GemmCoord problem_size, + int batch_count = 1, + ElementAccumulator alpha = ElementAccumulator(1), + ElementAccumulator beta = ElementAccumulator(0)) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + this->initialize(problem_size); + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + mode, + problem_size, + batch_count, + {alpha, beta}, + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data(), + tensor_D.device_data(), + tensor_Reduction.device_data(), + tensor_Tensor.device_data(), + problem_size.m() * problem_size.k(), + problem_size.n() * problem_size.k(), + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + problem_size.m(), + problem_size.m() * problem_size.n(), + tensor_A.layout().stride(0), + tensor_B.layout().stride(0), + tensor_C.layout().stride(0), + tensor_D.layout().stride(0), + tensor_Reduction.layout().stride(0), + tensor_Tensor.layout().stride(0), + }; + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); + + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = this->verify(problem_size, alpha, beta); + + if (!passed) { + std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl; + } + + // + // Profile + // + + #if 0 // profiling disabled for now. + + int const kWorkspaces = 100; + + cutlass::DeviceAllocation profiling_tensor_A(tensor_A.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_B(tensor_B.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_C(tensor_C.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_D(tensor_D.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_Reduction(tensor_Reduction.capacity() * kWorkspaces); + cutlass::DeviceAllocation profiling_tensor_Tensor(tensor_Tensor.capacity() * kWorkspaces); + + cudaEvent_t events[2]; + for (auto & event : events) { + cudaError_t result = cudaEventCreate(&event); + if (result != cudaSuccess) { + EXPECT_EQ(result, cudaSuccess) << " cudaEventCreate() failed with error " << cudaGetErrorString(result); + return false; + break; + } + } + + int const kWarmupIterations = 5; + int const kProfilingIterations = 100; + + for (int i = 0; i < kWarmupIterations; ++i) { + status = gemm_op(); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + } + + + cudaError_t result = cudaEventRecord(events[0]); + EXPECT_EQ(result, cudaSuccess); + + for (int i = 0; i < kProfilingIterations; ++i) { + + typename Gemm::Arguments arguments{ + mode, + problem_size, + batch_count, + {alpha, beta}, + profiling_tensor_A.get() + tensor_A.capacity() * (i % kWorkspaces), + profiling_tensor_B.get() + tensor_B.capacity() * (i % kWorkspaces), + profiling_tensor_C.get() + tensor_C.capacity() * (i % kWorkspaces), + profiling_tensor_D.get() + tensor_D.capacity() * (i % kWorkspaces), + profiling_tensor_Reduction.get() + tensor_Reduction.capacity() * (i % kWorkspaces), + profiling_tensor_Tensor.get() + tensor_Tensor.capacity() * (i % kWorkspaces), + problem_size.m() * problem_size.k(), + problem_size.n() * problem_size.k(), + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + problem_size.m(), + problem_size.m() * problem_size.n(), + tensor_A.layout().stride(0), + tensor_B.layout().stride(0), + tensor_C.layout().stride(0), + tensor_D.layout().stride(0), + tensor_Reduction.layout().stride(0), + tensor_Tensor.layout().stride(0), + }; + + gemm_op.initialize(arguments, workspace.get()); + status = gemm_op(); + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + } + + result = cudaEventRecord(events[1]); + EXPECT_EQ(result, cudaSuccess); + + result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess); + + float elapsed_time = 0; + result = cudaEventElapsedTime(&elapsed_time, events[0], events[1]); + EXPECT_EQ(result, cudaSuccess); + + double average_time = double(elapsed_time) / double(kProfilingIterations); + + std::cout << problem_size << ": " << average_time << " ms" << std::endl; + + for (auto & event : events) { + cudaEventDestroy(event); + } + #endif + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +template +bool TestGemmWithReduction( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmUniversalMode mode, + int batch_count = 1, + double alpha = 1.0, + double beta = 2.0) { + + bool passed = true; + + TestbedGemmWithReduction testbed; + + using ElementAccumulator = typename Gemm::ElementAccumulator; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped.h new file mode 100644 index 0000000000000000000000000000000000000000..c5ee3ceb8a428aacf713a0802411a50111611d97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped.h @@ -0,0 +1,501 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface + +*/ + +#pragma once + +#include +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/gemm_grouped.h" +#include "cutlass/gemm/kernel/default_gemm_grouped.h" +#include "cutlass/gemm/device/gemm_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/tensor_view_io.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct TestbedGrouped { + + // + // Type definitions + // + + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using ElementC = typename Gemm::ElementC; + using ElementAccumulator = typename Gemm::ElementAccumulator; + + using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; + using ElementCompute = typename EpilogueOutputOp::ElementCompute; + + using LayoutA = typename Gemm::LayoutA; + using LayoutB = typename Gemm::LayoutB; + using LayoutC = typename Gemm::LayoutC; + + using MatrixCoord = typename LayoutC::TensorCoord; + + // + // Data members + // + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint32_t seed; + + int problem_count; + + std::vector problem_sizes_host; + cutlass::DeviceAllocation problem_sizes_device; + + std::vector offset_A; + std::vector offset_B; + std::vector offset_C; + std::vector offset_D; + + std::vector lda_host; + std::vector ldb_host; + std::vector ldc_host; + std::vector ldd_host; + + cutlass::DeviceAllocation lda; + cutlass::DeviceAllocation ldb; + cutlass::DeviceAllocation ldc; + cutlass::DeviceAllocation ldd; + + cutlass::DeviceAllocation block_A; + cutlass::DeviceAllocation block_B; + cutlass::DeviceAllocation block_C; + cutlass::DeviceAllocation block_D; + + cutlass::DeviceAllocation ptr_A; + cutlass::DeviceAllocation ptr_B; + cutlass::DeviceAllocation ptr_C; + cutlass::DeviceAllocation ptr_D; + + // + // Methods + // + + TestbedGrouped( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint32_t seed_ = 3080 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint32_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + if (cutlass::sizeof_bits::value <= 16) { + scope_max = 5; + scope_min = -5; + } + else { + scope_max = 8; + scope_min = -8; + } + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + // no fill - remain zero + } + + return true; + } + + /// Initializes data structures + void initialize() { + + // + // Choose random problem sizes + // + + // construct a few problems of random sizes + srand(seed); + + int64_t total_elements_A = 0; + int64_t total_elements_B = 0; + int64_t total_elements_C = 0; + int64_t total_elements_D = 0; + + + lda_host.resize(problem_count); + ldb_host.resize(problem_count); + ldc_host.resize(problem_count); + ldd_host.resize(problem_count); + + problem_sizes_host.clear(); + problem_sizes_host.resize(problem_count); + + for (int32_t i = 0; i < problem_count; ++i) { + + cutlass::gemm::GemmCoord problem( + 8 * (rand() % 64) + 24, + 8 * (rand() % 64) + 24, + 8 * (rand() % 64) + 24); + + if (!i) { + problem = cutlass::gemm::GemmCoord(48, 16, 8); + } + + problem_sizes_host.at(i) = problem; + + // std::cout << "Problem[" << i << "]: " << problem << std::endl; + + lda_host.at(i) = LayoutA::packed({problem.m(), problem.k()}).stride(0); + ldb_host.at(i) = LayoutB::packed({problem.k(), problem.n()}).stride(0); + ldc_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0); + ldd_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0); + + offset_A.push_back(total_elements_A); + offset_B.push_back(total_elements_B); + offset_C.push_back(total_elements_C); + offset_D.push_back(total_elements_D); + + int64_t elements_A = problem.m() * problem.k(); + int64_t elements_B = problem.k() * problem.n(); + int64_t elements_C = problem.m() * problem.n(); + int64_t elements_D = problem.m() * problem.n(); + + total_elements_A += elements_A; + total_elements_B += elements_B; + total_elements_C += elements_C; + total_elements_D += elements_D; + + // Random strides between problems? + } + + problem_sizes_device.reset(problem_count); + problem_sizes_device.copy_from_host(problem_sizes_host.data()); + + lda.reset(problem_count); + ldb.reset(problem_count); + ldc.reset(problem_count); + ldd.reset(problem_count); + + lda.copy_from_host(lda_host.data()); + ldb.copy_from_host(ldb_host.data()); + ldc.copy_from_host(ldc_host.data()); + ldd.copy_from_host(ldd_host.data()); + + // + // Assign pointers + // + + block_A.reset(total_elements_A); + block_B.reset(total_elements_B); + block_C.reset(total_elements_C); + block_D.reset(total_elements_D); + + std::vector ptr_A_host(problem_count); + std::vector ptr_B_host(problem_count); + std::vector ptr_C_host(problem_count); + std::vector ptr_D_host(problem_count); + + for (int32_t i = 0; i < problem_count; ++i) { + ptr_A_host.at(i) = block_A.get() + offset_A.at(i); + ptr_B_host.at(i) = block_B.get() + offset_B.at(i); + ptr_C_host.at(i) = block_C.get() + offset_C.at(i); + ptr_D_host.at(i) = block_D.get() + offset_D.at(i); + } + + ptr_A.reset(problem_count); + ptr_A.copy_from_host(ptr_A_host.data()); + + ptr_B.reset(problem_count); + ptr_B.copy_from_host(ptr_B_host.data()); + + ptr_C.reset(problem_count); + ptr_C.copy_from_host(ptr_C_host.data()); + + ptr_D.reset(problem_count); + ptr_D.copy_from_host(ptr_D_host.data()); + + // + // Initialize the problems of the workspace + // + + for (int32_t i = 0; i < problem_count; ++i) { + cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i); + + LayoutA layout_A(lda_host.at(i)); + LayoutB layout_B(ldb_host.at(i)); + LayoutC layout_C(ldc_host.at(i)); + LayoutC layout_D(ldd_host.at(i)); + + MatrixCoord extent_A{problem.m(), problem.k()}; + MatrixCoord extent_B{problem.k(), problem.n()}; + MatrixCoord extent_C{problem.m(), problem.n()}; + + std::vector matrix_A(layout_A.capacity(extent_A)); + std::vector matrix_B(layout_B.capacity(extent_B)); + std::vector matrix_C(layout_C.capacity(extent_C)); + std::vector matrix_D(layout_D.capacity(extent_C)); + + initialize_tensor(cutlass::TensorView(matrix_A.data(), layout_A, extent_A), init_A, seed * 2021); + initialize_tensor(cutlass::TensorView(matrix_B.data(), layout_B, extent_B), init_B, seed * 2022); + initialize_tensor(cutlass::TensorView(matrix_C.data(), layout_C, extent_C), init_C, seed * 2023); + + cutlass::device_memory::copy_to_device(ptr_A_host.at(i), matrix_A.data(), matrix_A.size()); + cutlass::device_memory::copy_to_device(ptr_B_host.at(i), matrix_B.data(), matrix_B.size()); + cutlass::device_memory::copy_to_device(ptr_C_host.at(i), matrix_C.data(), matrix_C.size()); + cutlass::device_memory::copy_to_device(ptr_D_host.at(i), matrix_D.data(), matrix_D.size()); + } + } + + /// Verifies the result is a GEMM + bool verify( + ElementCompute alpha, + ElementCompute beta) { + + bool passed = true; + + for (int32_t i = 0; i < problem_count; ++i) { + cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i); + + LayoutA layout_A(lda_host.at(i)); + LayoutB layout_B(ldb_host.at(i)); + LayoutC layout_C(ldc_host.at(i)); + LayoutC layout_D(ldd_host.at(i)); + + MatrixCoord extent_A{problem.m(), problem.k()}; + MatrixCoord extent_B{problem.k(), problem.n()}; + MatrixCoord extent_C{problem.m(), problem.n()}; + + std::vector matrix_A(layout_A.capacity(extent_A)); + std::vector matrix_B(layout_B.capacity(extent_B)); + std::vector matrix_C(layout_C.capacity(extent_C)); + std::vector matrix_D(layout_D.capacity(extent_C)); + std::vector matrix_Ref(layout_D.capacity(extent_C)); + + cutlass::device_memory::copy_to_host(matrix_A.data(), block_A.get() + offset_A.at(i), matrix_A.size()); + cutlass::device_memory::copy_to_host(matrix_B.data(), block_B.get() + offset_B.at(i), matrix_B.size()); + cutlass::device_memory::copy_to_host(matrix_C.data(), block_C.get() + offset_C.at(i), matrix_C.size()); + cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size()); + + cutlass::TensorView view_A(matrix_A.data(), layout_A, extent_A); + cutlass::TensorView view_B(matrix_B.data(), layout_B, extent_B); + cutlass::TensorView view_C(matrix_C.data(), layout_C, extent_C); + cutlass::TensorView view_D(matrix_D.data(), layout_D, extent_C); + cutlass::TensorView view_Ref(matrix_Ref.data(), layout_D, extent_C); + + // Reference GEMM + cutlass::reference::host::GemmComplex< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, ElementAccumulator + >( + problem, + alpha, + view_A, + Gemm::kTransformA, + view_B, + Gemm::kTransformB, + beta, + view_C, + view_Ref, + ElementAccumulator(0) + ); + + // Ensure that no input or output is entirely zero + EXPECT_GT(cutlass::reference::host::TensorNorm(view_A), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(view_B), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(view_C), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(view_D), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(view_Ref), 0); + + // Compare against reference + passed = cutlass::reference::host::TensorEquals(view_D, view_Ref); + + if (!passed) { + std::ofstream file("testbed_grouped_errors.txt"); + + file + << "problem: " << problem << " [group: " << i << "]\n" + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << view_A + << "\nB =\n" << view_B + << "\nC =\n" << view_C + << "\n\nReference =\n" << view_Ref + << "\nComputed =\n" << view_D; + + return passed; + } + } + + return passed; + } + + /// Executes one test + bool run( + int problem_count, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) { + + this->problem_count = problem_count; + + // Initialize the problem + initialize(); + + int threadblock_count = Gemm::sufficient(problem_sizes_host.data(), problem_count); + + // Early exit + if (!threadblock_count) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device resources." << std::endl; + } + return true; + } + + // Configure the GEMM arguments + typename EpilogueOutputOp::Params epilogue_op(alpha, beta); + + // Configure GEMM arguments + typename Gemm::Arguments args( + problem_sizes_device.get(), + problem_count, + threadblock_count, + epilogue_op, + ptr_A.get(), + ptr_B.get(), + ptr_C.get(), + ptr_D.get(), + lda.get(), + ldb.get(), + ldc.get(), + ldd.get(), + problem_sizes_host.data() + ); + + // Initialize the GEMM object + Gemm gemm; + + size_t workspace_size = gemm.get_workspace_size(args); + cutlass::DeviceAllocation workspace(workspace_size); + + cutlass::Status status = gemm.initialize(args, workspace.get()); + + if (status != cutlass::Status::kSuccess) { + return false; + } + + // Run the GEMM object + status = gemm.run(); + + if (status != cutlass::Status::kSuccess) { + return false; + } + + // Wait for completion + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) + << "Kernel execution error: " << cudaGetErrorString(result); + + if (result != cudaSuccess) { + return false; + } + + // Verify correctness + return verify(alpha, beta); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // device +} // gemm +} // test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped_rank_2k.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped_rank_2k.h new file mode 100644 index 0000000000000000000000000000000000000000..7b212aee04520645c7789cf672838c3ef09892b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped_rank_2k.h @@ -0,0 +1,502 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K interface + +*/ + +#pragma once + +#include +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" +#include "cutlass/device_kernel.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped.h" +#include "cutlass/gemm/kernel/default_rank_2k_grouped.h" +#include "cutlass/gemm/device/rank_2k_grouped.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/rank_2k_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/tensor_view_io.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct TestbedGrouped { + + // + // Type definitions + // + + using ElementA = typename Rank2K::ElementA; + using ElementB = typename Rank2K::ElementB; + using ElementC = typename Rank2K::ElementC; + using ElementAccumulator = typename Rank2K::ElementAccumulator; + + using EpilogueOutputOp = typename Rank2K::EpilogueOutputOp; + using ElementCompute = typename EpilogueOutputOp::ElementCompute; + + using LayoutA = typename Rank2K::LayoutA; + using LayoutB = typename Rank2K::LayoutB; + using LayoutC = typename Rank2K::LayoutC; + + using MatrixCoord = typename LayoutC::TensorCoord; + + // + // Data members + // + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint32_t seed; + + int problem_count; + + std::vector problem_sizes_host; + cutlass::DeviceAllocation problem_sizes_device; + + std::vector offset_A; + std::vector offset_B; + std::vector offset_C; + std::vector offset_D; + + std::vector lda_host; + std::vector ldb_host; + std::vector ldc_host; + std::vector ldd_host; + + cutlass::DeviceAllocation lda; + cutlass::DeviceAllocation ldb; + cutlass::DeviceAllocation ldc; + cutlass::DeviceAllocation ldd; + + cutlass::DeviceAllocation block_A; + cutlass::DeviceAllocation block_B; + cutlass::DeviceAllocation block_C; + cutlass::DeviceAllocation block_D; + + cutlass::DeviceAllocation ptr_A; + cutlass::DeviceAllocation ptr_B; + cutlass::DeviceAllocation ptr_C; + cutlass::DeviceAllocation ptr_D; + + // + // Methods + // + + TestbedGrouped( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint32_t seed_ = 3080 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint32_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + if (cutlass::sizeof_bits::value <= 16) { + scope_max = 5; + scope_min = -5; + } + else { + scope_max = 8; + scope_min = -8; + } + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + // no fill - remain zero + } + + return true; + } + + /// Initializes data structures + void initialize() { + + // + // Choose random problem sizes + // + + // construct a few problems of random sizes + srand(seed); + + int64_t total_elements_A = 0; + int64_t total_elements_B = 0; + int64_t total_elements_C = 0; + int64_t total_elements_D = 0; + + + lda_host.resize(problem_count); + ldb_host.resize(problem_count); + ldc_host.resize(problem_count); + ldd_host.resize(problem_count); + + problem_sizes_host.clear(); + problem_sizes_host.resize(problem_count); + + for (int32_t i = 0; i < problem_count; ++i) { + + auto N = 8 * (rand() % 64) + 24; + auto K = 8 * (rand() % 64) + 24; + cutlass::gemm::GemmCoord problem(N, N, K); + + if (!i) { + problem = cutlass::gemm::GemmCoord(16, 16, 8); + } + + problem_sizes_host.at(i) = problem; + + lda_host.at(i) = LayoutA::packed({problem.n(), problem.k()}).stride(0); + ldb_host.at(i) = LayoutB::packed({problem.n(), problem.k()}).stride(0); + ldc_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0); + ldd_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0); + + offset_A.push_back(total_elements_A); + offset_B.push_back(total_elements_B); + offset_C.push_back(total_elements_C); + offset_D.push_back(total_elements_D); + + int64_t elements_A = problem.n() * problem.k(); + int64_t elements_B = problem.n() * problem.k(); + int64_t elements_C = problem.n() * problem.n(); + int64_t elements_D = problem.n() * problem.n(); + + total_elements_A += elements_A; + total_elements_B += elements_B; + total_elements_C += elements_C; + total_elements_D += elements_D; + + // Random strides between problems? + } + + problem_sizes_device.reset(problem_count); + problem_sizes_device.copy_from_host(problem_sizes_host.data()); + + lda.reset(problem_count); + ldb.reset(problem_count); + ldc.reset(problem_count); + ldd.reset(problem_count); + + lda.copy_from_host(lda_host.data()); + ldb.copy_from_host(ldb_host.data()); + ldc.copy_from_host(ldc_host.data()); + ldd.copy_from_host(ldd_host.data()); + + // + // Assign pointers + // + + block_A.reset(total_elements_A); + block_B.reset(total_elements_B); + block_C.reset(total_elements_C); + block_D.reset(total_elements_D); + + std::vector ptr_A_host(problem_count); + std::vector ptr_B_host(problem_count); + std::vector ptr_C_host(problem_count); + std::vector ptr_D_host(problem_count); + + for (int32_t i = 0; i < problem_count; ++i) { + ptr_A_host.at(i) = block_A.get() + offset_A.at(i); + ptr_B_host.at(i) = block_B.get() + offset_B.at(i); + ptr_C_host.at(i) = block_C.get() + offset_C.at(i); + ptr_D_host.at(i) = block_D.get() + offset_D.at(i); + } + + ptr_A.reset(problem_count); + ptr_A.copy_from_host(ptr_A_host.data()); + + ptr_B.reset(problem_count); + ptr_B.copy_from_host(ptr_B_host.data()); + + ptr_C.reset(problem_count); + ptr_C.copy_from_host(ptr_C_host.data()); + + ptr_D.reset(problem_count); + ptr_D.copy_from_host(ptr_D_host.data()); + + // + // Initialize the problems of the workspace + // + + for (int32_t i = 0; i < problem_count; ++i) { + cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i); + + LayoutA layout_A(lda_host.at(i)); + LayoutB layout_B(ldb_host.at(i)); + LayoutC layout_C(ldc_host.at(i)); + LayoutC layout_D(ldd_host.at(i)); + + MatrixCoord extent_A{problem.n(), problem.k()}; + MatrixCoord extent_B{problem.n(), problem.k()}; + MatrixCoord extent_C{problem.n(), problem.n()}; + + std::vector matrix_A(layout_A.capacity(extent_A)); + std::vector matrix_B(layout_B.capacity(extent_B)); + std::vector matrix_C(layout_C.capacity(extent_C)); + std::vector matrix_D(layout_D.capacity(extent_C)); + + initialize_tensor(cutlass::TensorView(matrix_A.data(), layout_A, extent_A), init_A, seed * 2021); + initialize_tensor(cutlass::TensorView(matrix_B.data(), layout_B, extent_B), init_B, seed * 2022); + initialize_tensor(cutlass::TensorView(matrix_C.data(), layout_C, extent_C), init_C, seed * 2023); + + cutlass::device_memory::copy_to_device(ptr_A_host.at(i), matrix_A.data(), matrix_A.size()); + cutlass::device_memory::copy_to_device(ptr_B_host.at(i), matrix_B.data(), matrix_B.size()); + cutlass::device_memory::copy_to_device(ptr_C_host.at(i), matrix_C.data(), matrix_C.size()); + cutlass::device_memory::copy_to_device(ptr_D_host.at(i), matrix_D.data(), matrix_D.size()); + } + } + + /// Verifies the result is a Rank2K + bool verify( + ElementCompute alpha, + ElementCompute beta) { + + bool passed = true; + + for (int32_t i = 0; i < problem_count; ++i) { + cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i); + + LayoutA layout_A(lda_host.at(i)); + LayoutB layout_B(ldb_host.at(i)); + LayoutC layout_C(ldc_host.at(i)); + LayoutC layout_D(ldd_host.at(i)); + + MatrixCoord extent_A{problem.n(), problem.k()}; + MatrixCoord extent_B{problem.n(), problem.k()}; + MatrixCoord extent_C{problem.n(), problem.n()}; + + std::vector matrix_A(layout_A.capacity(extent_A)); + std::vector matrix_B(layout_B.capacity(extent_B)); + std::vector matrix_C(layout_C.capacity(extent_C)); + std::vector matrix_D(layout_D.capacity(extent_C)); + std::vector matrix_Ref(layout_D.capacity(extent_C)); + + cutlass::device_memory::copy_to_host(matrix_A.data(), block_A.get() + offset_A.at(i), matrix_A.size()); + cutlass::device_memory::copy_to_host(matrix_B.data(), block_B.get() + offset_B.at(i), matrix_B.size()); + cutlass::device_memory::copy_to_host(matrix_C.data(), block_C.get() + offset_C.at(i), matrix_C.size()); + cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size()); + + cutlass::TensorView view_A(matrix_A.data(), layout_A, extent_A); + cutlass::TensorView view_B(matrix_B.data(), layout_B, extent_B); + cutlass::TensorView view_C(matrix_C.data(), layout_C, extent_C); + cutlass::TensorView view_D(matrix_D.data(), layout_D, extent_C); + cutlass::TensorView view_Ref(matrix_Ref.data(), layout_D, extent_C); + + // Reference Rank2K + cutlass::reference::host::Rank2KComplex< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, ElementAccumulator + >( + problem, + alpha, + view_A, + Rank2K::kTransformA, + view_B, + Rank2K::kTransformB, + beta, + view_C, + view_Ref, + ElementAccumulator(0), + Rank2K::kFillModeC, + Rank2K::kBlasMode + ); + + // Ensure that no input or output is entirely zero + EXPECT_GT(cutlass::reference::host::TensorNorm(view_A), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(view_B), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(view_C), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(view_D), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(view_Ref), 0); + + // Compare against reference + passed = cutlass::reference::host::TensorEquals(view_D, view_Ref); + + if (!passed) { + std::ofstream file("testbed_grouped_errors.txt"); + + file + << "problem: " << problem << " [group: " << i << "]\n" + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << view_A + << "\nB =\n" << view_B + << "\nC =\n" << view_C + << "\n\nReference =\n" << view_Ref + << "\nComputed =\n" << view_D; + + return passed; + } + } + + return passed; + } + + /// Executes one test + bool run( + int problem_count, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) { + + this->problem_count = problem_count; + + // Initialize the problem + initialize(); + + int threadblock_count = Rank2K::sufficient(problem_sizes_host.data(), problem_count); + + // Early exit + if (!threadblock_count) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device resources." << std::endl; + } + return true; + } + + // Configure the Rank2K arguments + typename EpilogueOutputOp::Params epilogue_op(alpha, beta); + + // Configure Rank2K arguments + typename Rank2K::Arguments args( + cutlass::gemm::GemmUniversalMode::kGemm, + problem_sizes_device.get(), + problem_count, + threadblock_count, + epilogue_op, + ptr_A.get(), + ptr_B.get(), + ptr_C.get(), + ptr_D.get(), + lda.get(), + ldb.get(), + ldc.get(), + ldd.get(), + problem_sizes_host.data() + ); + + // Initialize the Rank2K object + Rank2K rank2k; + + size_t workspace_size = rank2k.get_workspace_size(args); + cutlass::DeviceAllocation workspace(workspace_size); + + cutlass::Status status = rank2k.initialize(args, workspace.get()); + + if (status != cutlass::Status::kSuccess) { + return false; + } + + // Run the Rank2K object + status = rank2k.run(); + + if (status != cutlass::Status::kSuccess) { + return false; + } + + // Wait for completion + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) + << "Kernel execution error: " << cudaGetErrorString(result); + + if (result != cudaSuccess) { + return false; + } + + // Verify correctness + return verify(alpha, beta); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // device +} // gemm +} // test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped_rank_2k_scheduler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped_rank_2k_scheduler.h new file mode 100644 index 0000000000000000000000000000000000000000..af588d37f5096e8707092ed058ba268517be10f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_grouped_rank_2k_scheduler.h @@ -0,0 +1,461 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for grouped Rank2K problem visitors +*/ + +#pragma once + +#include +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h" +#include "cutlass/util/device_memory.h" +#include "cutlass/device_kernel.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Use simple problem visitor as a baseline +template +struct BaselineProblemVisitor : public cutlass::gemm::kernel::BaseGroupedProblemVisitor { + using Base = cutlass::gemm::kernel::BaseGroupedProblemVisitor; + using Params = typename Base::Params; + static int const kThreadCount = ThreadCount; + static cutlass::FillMode const kFillModeC = FillModeC; + + struct SharedStorage {}; + + int32_t tile_count_sum; + SharedStorage &shared_storage; + + // + // Methods + // + CUTLASS_DEVICE + BaselineProblemVisitor( + Params const ¶ms_, + SharedStorage &shared_storage_, + int32_t block_idx + ): Base(params_, block_idx), + shared_storage(shared_storage_) + { + cutlass::gemm::GemmCoord problem = this->problem_size(); + cutlass::gemm::GemmCoord grid = this->grid_shape(problem); + tile_count_sum = this->tile_count(grid); + } + + CUTLASS_DEVICE + bool next_tile() { + if (this->tile_idx < tile_count_sum) { + return true; + } + + do { + ++this->problem_idx; + + if (this->problem_idx >= this->params.problem_count) { + return false; + } + + cutlass::gemm::GemmCoord problem = this->problem_size(); + cutlass::gemm::GemmCoord grid = this->grid_shape(problem); + + this->problem_tile_start = tile_count_sum; + tile_count_sum += this->tile_count(grid); + + } while (tile_count_sum <= this->tile_idx); + + return true; + } + + static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count) { + return 0; + } + + static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count, + void* host_workspace_ptr) {} + + CUTLASS_DEVICE + cutlass::gemm::GemmCoord threadblock_offset(int32_t threadblock_id) const { + int32_t macro_id = threadblock_id / ProblemSizeHelper::OffsetHelper::kThreadblockSkewRatio; + int32_t macro_row = ceil(cutlass::fast_sqrt((2*macro_id) + 2.25) - 0.5) - 1; + int32_t macro_col = macro_id - (((macro_row+1) * macro_row)/2); + + if (FillModeC == cutlass::FillMode::kUpper) { + cutlass::swap(macro_row, macro_col); + } + + int32_t row = ProblemSizeHelper::OffsetHelper::macro_row_to_row(macro_row, threadblock_id); + int32_t col = ProblemSizeHelper::OffsetHelper::macro_col_to_col(macro_col, threadblock_id); + + return cutlass::gemm::GemmCoord(row, col, 0); + } +}; + +template +struct ProblemVisitorKernel { + struct SharedStorage { + typename ProblemVisitor::SharedStorage problem_visitor; + }; + + struct Params { + typename ProblemVisitor::Params problem_visitor_params; + int32_t* visited_problems_ptr; + int32_t* visited_tiles_ptr; + int32_t visits_per_block; + + Params(): + visited_problems_ptr(nullptr), + visited_tiles_ptr(nullptr), + visits_per_block(0) {} + + Params(typename ProblemVisitor::Params problem_visitor_params_, + int32_t* visited_problems_ptr_, + int32_t* visited_tiles_ptr_, + int32_t visits_per_block_): + problem_visitor_params(problem_visitor_params_), + visited_problems_ptr(visited_problems_ptr_), + visited_tiles_ptr(visited_tiles_ptr_), + visits_per_block(visits_per_block_) {} + }; + + CUTLASS_DEVICE + void operator()(const Params& params, SharedStorage &shared_storage) { + int32_t store_offset = params.visits_per_block * blockIdx.x; + ProblemVisitor problem_visitor(params.problem_visitor_params, + shared_storage.problem_visitor, + blockIdx.x); + + while (problem_visitor.next_tile()) { + cutlass::gemm::GemmCoord problem_size = problem_visitor.problem_size(); + int32_t problem_idx = problem_visitor.problem_index(); + int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); + + cutlass::gemm::GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); + cutlass::gemm::GemmCoord tile_offset = problem_visitor.threadblock_offset(threadblock_idx); + + problem_visitor.advance(gridDim.x); + + // + // Early exit conditions + // 1) Out of range + // 2) Upper-triangular block in lower-triangular problem + // 3) Lower-triangular block in upper-triangular problem + // + + if (grid_shape.m() <= tile_offset.m() || + grid_shape.n() <= tile_offset.n()) { + continue; + } + + if (ProblemVisitor::kFillModeC == cutlass::FillMode::kLower && + (tile_offset.m() + 1) * ProblemVisitor::ThreadblockShape::kM <= tile_offset.n() * ProblemVisitor::ThreadblockShape::kN) { + continue; + } + + if (ProblemVisitor::kFillModeC == cutlass::FillMode::kUpper && + tile_offset.m() * ProblemVisitor::ThreadblockShape::kM >= (tile_offset.n() + 1) * ProblemVisitor::ThreadblockShape::kN) { + continue; + } + + if (threadIdx.x == 0) { + params.visited_problems_ptr[store_offset] = problem_idx; + params.visited_tiles_ptr[store_offset] = threadblock_idx; + ++store_offset; + } + } + } +}; + +template +struct ProblemVisitorRunner { + using BaseKernel = ProblemVisitorKernel; + using Params = typename BaseKernel::Params; + + Params params; + std::vector host_problem_sizes; + int32_t problem_count; + int32_t threadblock_count; + int32_t visits_per_block; + cutlass::DeviceAllocation visited_problems; + cutlass::DeviceAllocation visited_tiles; + cutlass::DeviceAllocation device_problem_sizes; + cutlass::DeviceAllocation workspace; + std::vector host_visited_problems; + std::vector host_visited_tiles; + + ProblemVisitorRunner(const std::vector& host_problem_sizes_, + int32_t threadblock_count_): + host_problem_sizes(host_problem_sizes_), + problem_count(int32_t(host_problem_sizes_.size())), + threadblock_count(threadblock_count_) {} + + /// Initializes GEMM state from arguments. + cutlass::Status initialize() { + size_t workspace_bytes = ProblemVisitor::get_workspace_size( + host_problem_sizes.data(), + problem_count, + threadblock_count); + + workspace.reset(workspace_bytes); + std::vector host_workspace(workspace_bytes); + + int32_t tile_count = ProblemVisitor::group_tile_count(host_problem_sizes.data(), problem_count); + + ProblemVisitor::host_precompute(host_problem_sizes.data(), problem_count, + threadblock_count, host_workspace.data()); + + workspace.copy_from_host(host_workspace.data(), workspace_bytes); + + device_problem_sizes.reset(problem_count); + device_problem_sizes.copy_from_host(host_problem_sizes.data(), problem_count); + + visits_per_block = (tile_count - 1 + threadblock_count) / threadblock_count; + int32_t total_visits = visits_per_block * threadblock_count; + + visited_problems.reset(total_visits); + visited_tiles.reset(total_visits); + host_visited_problems.resize(total_visits); + host_visited_tiles.resize(total_visits); + + cudaError_t result = cudaMemset(visited_problems.get(), -1, sizeof(int32_t) * total_visits); + if (result != cudaSuccess) { + return cutlass::Status::kErrorInternal; + } + + result = cudaMemset(visited_tiles.get(), -1, sizeof(int32_t) * total_visits); + if (result != cudaSuccess) { + return cutlass::Status::kErrorInternal; + } + + typename ProblemVisitor::Params pv_params(device_problem_sizes.get(), problem_count, workspace.get(), tile_count); + params = Params(pv_params, visited_problems.get(), visited_tiles.get(), visits_per_block); + + return cutlass::Status::kSuccess; + } + + bool verify() { + // Sort by problem size and then by threadblock_idx + std::vector indices(host_visited_problems.size()); + std::iota(indices.begin(), indices.end(), 0); + + std::stable_sort(indices.begin(), indices.end(), + [&](int32_t i1, int32_t i2) { + if (host_visited_problems[i1] == host_visited_problems[i2]) { + return host_visited_tiles[i1] < host_visited_tiles[i2]; + } + return host_visited_problems[i1] < host_visited_problems[i2]; + }); + + int32_t idx = 0; + + // Skip any entries that were not visited + while (host_visited_problems[indices[idx]] == -1) { + ++idx; + } + + // Check that each problem visited has the tiles we expect + for (int32_t problem_idx = 0; problem_idx < problem_count; ++problem_idx) { + auto problem = host_problem_sizes[problem_idx]; + ProblemVisitor::possibly_transpose_problem(problem); + int32_t problem_tiles = ProblemVisitor::tile_count(ProblemVisitor::grid_shape(problem)); + for (int i = 0; i < problem_tiles; ++i) { + EXPECT_EQ(problem_idx, host_visited_problems[indices[idx]]); + EXPECT_EQ(i, host_visited_tiles[indices[idx]]); + ++idx; + } + } + + return true; + } + + bool run(bool skip_tile_check=false, cudaStream_t stream = nullptr) { + cutlass::Status status = initialize(); + if (status != cutlass::Status::kSuccess) { + std::cerr << "Initialization failed" << std::endl; + return false; + } + + dim3 grid(threadblock_count, 1, 1); + dim3 block(ProblemVisitor::kThreadCount, 1, 1); + int smem_size = int(sizeof(typename BaseKernel::SharedStorage)); + + cutlass::Kernel<<>>(params); + + cudaError_t result = cudaGetLastError(); + if (result != cudaSuccess) { + std::cerr << "grid launch failed with error " << cudaGetErrorString(result) << std::endl; + return false; + } + + result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + std::cerr << "cudaDeviceSynchronize failed with error " << cudaGetErrorString(result) << std::endl; + return false; + } + + visited_problems.copy_to_host(host_visited_problems.data()); + visited_tiles.copy_to_host(host_visited_tiles.data()); + + if (skip_tile_check) { + return true; + } + + return verify(); + } +}; + +template +struct TestbedGroupedRank2KScheduler { + + using BaselinePV = BaselineProblemVisitor, + ThreadblockShape, + PrefetchTileCount, + ThreadCount, + FillModeC>; + + // + // Data members + // + + // Whether to skip checking that the tiles are visited as expected. This is useful + // in cases where ThreadblockShape::kM != ThreadblockShape::kN, for which the grouped + // Rank2K scheduler may assign out-of-bounds tiles that will cause a threadblock to + // exit early, but which are difficult to detect in tests without reimplementing + // this functionality. + bool skip_tile_check; + uint32_t seed; + int problem_count; + int threadblock_count; + std::vector problem_sizes_host; + + // + // Methods + // + + TestbedGroupedRank2KScheduler(bool skip_tile_check_=false, uint32_t seed_ = 3080): + skip_tile_check(skip_tile_check_), seed(seed_) { srand(seed); } + + /// Initializes data structures + void initialize(int32_t scale_factor) { + + // + // Choose random problem sizes + // + + problem_sizes_host.clear(); + problem_sizes_host.resize(problem_count); + + for (int32_t i = 0; i < problem_count; ++i) { + int n = scale_factor * (rand() % 64) + 24; + + cutlass::gemm::GemmCoord problem( + n, + n, + scale_factor * (rand() % 64) + 24); + + problem_sizes_host.at(i) = problem; + } + } + + template + void compare_visitors(const ProblemVisitorRunner& baseline_runner) { + using PV = cutlass::gemm::kernel::Rank2KGroupedProblemVisitor< + ThreadblockShape, + GroupScheduleMode_, + PrefetchTileCount, + ThreadCount, + FillModeC>; + ProblemVisitorRunner runner(problem_sizes_host, threadblock_count); + EXPECT_TRUE(runner.run(skip_tile_check)); + + // Check that this problem visitor visits the same problems and tiles as the baseline + EXPECT_EQ(baseline_runner.host_visited_problems, runner.host_visited_problems); + EXPECT_EQ(baseline_runner.host_visited_tiles, runner.host_visited_tiles); + } + + template + void compare_visitors(const ProblemVisitorRunner& baseline_runner) { + // Compare the next visitor with the baseline visitor + compare_visitors(baseline_runner); + + // Recurse to compare the next visitors + compare_visitors(baseline_runner); + } + + /// Executes the test on all scheduler modes + void run(int problem_count, int threadblock_count, int scale_factor=8) { + + this->problem_count = problem_count; + this->threadblock_count = threadblock_count; + + // Initialize the problem + initialize(scale_factor); + + // Run the baseline visitor to which we will compare all other visitors + ProblemVisitorRunner baseline_runner(problem_sizes_host, threadblock_count); + EXPECT_TRUE(baseline_runner.run(skip_tile_check)); + + compare_visitors(baseline_runner); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // device +} // gemm +} // test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_rank_k_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_rank_k_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..7c403ad8b560dde5042213c04c1110e0e5e787eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_rank_k_universal.h @@ -0,0 +1,511 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide Rank 2k update interface + +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/error_metrics.h" +#include "cutlass/util/reference/host/rank_k_complex.h" + +#include "testbed_utils.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct TestbedRank2KUniversal { + + using ElementA = typename RankK::ElementA; + using ElementC = typename RankK::ElementC; + using ElementAccumulator = typename RankK::ElementAccumulator; + using ElementCompute = typename RankK::RankKkernel::Epilogue::OutputOp::ElementCompute; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D; + cutlass::HostTensor reference_D; + + // + // Methods + // + + TestbedRank2KUniversal( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + init_A(init_A_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed, + int mantissa_in_bits) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + + EXPECT_TRUE(false) << "Input distribution not implemented"; + return false; + } + + return true; + } + + + /// Helper to initialize a tensor view + template + bool initialize_symmetric_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed, + int mantissa_in_bits) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillSymmetricRandomUniform( + view, seed, RankK::kFillModeC, scope_max, scope_min, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillSymmetricRandomGaussian( + view, seed, RankK::kFillModeC, 0, 0.5, mantissa_in_bits); + } + else { + + EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented"; + return false; + } + + return true; + } + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size) { + // + // Allocate the RankK workspace + // + + tensor_A.resize(problem_size.mk()); + tensor_C.resize(problem_size.mn()); + tensor_D.resize(problem_size.mn()); + reference_D.resize(problem_size.mn(), false); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits::bits)); + EXPECT_TRUE(initialize_symmetric_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits::bits)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = typename RankK::ElementA(1); + tensor_C.host_view().at({0, 0}) = typename RankK::ElementC(1); + + cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); + + tensor_A.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + + if (tensor_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + + if (reference_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view()); + + bool passed = l2_norm < cutlass::MantissaInBits::error; + + return passed; + } + + /// Verifies the result is a RankK + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + // + // Verify + // + cutlass::reference::host::Rank2KComplex< + typename RankK::ElementA, typename RankK::LayoutA, + typename RankK::ElementC, typename RankK::LayoutC, + ElementCompute, ElementAccumulator + >( + problem_size, + alpha, + tensor_A.host_ref(), + RankK::kTransformA, + beta, + tensor_C.host_ref(), + reference_D.host_ref(), + ElementAccumulator(0), + RankK::kFillModeC, + RankK::kBlasMode + ); + + return compare_reference(problem_size, alpha, beta); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename RankK::RankKkernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmUniversalMode mode, + cutlass::gemm::GemmCoord problem_size, + int batch_count = 1, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + +#if 0 + std::cout << "[TestbedRankKUniversal::run()] problem(m, n, k): " << problem_size + << " alpha: " << ElementCompute(alpha) + << " beta: " << ElementCompute(beta) << std::endl; +#endif + + this->initialize(problem_size); + + // + // Initialize the RankK operator + // + + typename RankK::Arguments arguments{ + mode, + problem_size, + batch_count, + {alpha, beta}, + tensor_A.device_data(), + tensor_C.device_data(), + tensor_D.device_data(), + problem_size.n() * problem_size.k(), + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + tensor_A.layout().stride(0), + tensor_C.layout().stride(0), + tensor_D.layout().stride(0) + }; + + RankK rank2k_op; + + size_t workspace_size = RankK::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = rank2k_op.initialize(arguments, workspace.get()); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Run the RankK + // + + status = rank2k_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = this->verify(problem_size, alpha, beta); + + //if (true) { + if (!passed) { + std::stringstream fname; + + fname << "error_RankK_device_" + << "fill_mode_c_" + << (RankK::kFillModeC == cutlass::FillMode::kLower ? "lower_" : + (RankK::kFillModeC == cutlass::FillMode::kUpper ? "upper_" : "invalid_")) + << "mnk_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << RankK::ThreadblockShape::kM << "x" + << RankK::ThreadblockShape::kN << "x" + << RankK::ThreadblockShape::kK << "_" + << RankK::WarpShape::kM << "x" + << RankK::WarpShape::kN << "x" + << RankK::WarpShape::kK << ".txt"; + + std::cout << fname.str() << std::endl; + + std::ofstream results(fname.str()); + + results << problem_size << std::endl; + + results + << "\nA:\n" << tensor_A.host_view() << "\n" + << "\nC:\n" << tensor_C.host_view() << "\n" + << "\nD reference:\n" << reference_D.host_view() << "\n" + << "\nD computed:\n" << tensor_D.host_view() << "\n"; + + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +template +bool TestRank2kUniversal( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmUniversalMode mode, + int batch_count, + double alpha = 1.0, + double beta = 2.0) { + + bool passed = true; + + TestbedRank2KUniversal testbed; + + using ElementCompute = typename RankK::EpilogueOutputOp::ElementCompute; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + return passed; +} + +template +bool TestAllRankKUniversal() { + bool passed = true; + + + int const kMinimumOperandElementSize = int(cutlass::sizeof_bits::value); + int const kAlignmentN = 128 / kMinimumOperandElementSize; + int const kAlignmentK = 128 / kMinimumOperandElementSize; + + cutlass::gemm::GemmUniversalMode modes[] = { + cutlass::gemm::GemmUniversalMode::kGemm, + }; + + int problem_size_n[] = { + kAlignmentN, 512 - 2*kAlignmentN + }; + + int problem_size_k[] = { + kAlignmentK, + RankK::ThreadblockShape::kK * RankK::kStages - kAlignmentK, + RankK::ThreadblockShape::kK * RankK::kStages * 3 - kAlignmentK + }; + + int batch_counts[] = { // may be interpretted as batch count or split-K slices + 1 // Just running one batch for now (removing 2, 3, 5, 7) + }; + + double problem_alpha[] = { + 1.0 + }; + + double problem_beta[] = { + 2.0 + }; + + + using ElementCompute = typename RankK::EpilogueOutputOp::ElementCompute; + + for (cutlass::gemm::GemmUniversalMode mode : modes) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + for (int batch_count : batch_counts) { + + for (auto alpha : problem_alpha) { + for (auto beta : problem_beta) { + + if (mode == cutlass::gemm::GemmUniversalMode::kGemm || + mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { + } + + cutlass::gemm::GemmCoord problem_size(n, n, k); + + TestbedRank2KUniversal testbed; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + if (!passed) { + return false; + } + } + } + } + } + } + } + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_sanity.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_sanity.h new file mode 100644 index 0000000000000000000000000000000000000000..73c0c5cd65a0002e3921cce7009c567533ee3d40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_sanity.h @@ -0,0 +1,238 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/core_io.h" + +#include "testbed.h" + + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// +// List of Gemm internal paramters this testbed supports user verification +// +enum class ParameterID { + + // Threadblock-level parameters + kSmemASize, + kSmemBSize, + + // Warp-level parameters + kWarpFragmentASize, + kWarpFragmentBSize, + kWarpFragmentCSize, + kInvalid +}; + +struct Reference { + ParameterID parameter_id; + + union { + int value; + + struct { + int m, n, k; + } gemm_shape; + + struct { + int row, column; + } matrix_shape; + }; + + std::string error_msg; + + Reference( + ParameterID parameter_id_, + int value_=-1, + std::string const &error_msg_="") : parameter_id(parameter_id_), value(value_), error_msg(error_msg_) {} +}; + + +template +struct TestbedSanity { + + // + // Type definitions (All Gemm types top down) + // + + // Unpacking Gemm types in the following order + // Kernel-level > Threadblock-level > Warp-level > Instruction-level + + // kernel-level cutlass Gemm + using GemmKernel = typename Gemm::GemmKernel; + + // + // Threadblock-level gemm types + // + using MmaThreadBlock = typename GemmKernel::Mma; + + // Threadblock-level gemm shape covering one stage + using ThreadblockShape = typename MmaThreadBlock::Shape; + + // Shared memory size covering all stages + using SmemShapeA = typename MmaThreadBlock::Base::SharedStorage::ShapeA; + using SmemPaddingA = typename MmaThreadBlock::Policy::SmemPaddingA; + using SmemShapeB = typename MmaThreadBlock::Base::SharedStorage::ShapeB; + using SmemPaddingB = typename MmaThreadBlock::Policy::SmemPaddingB; + + + /// Number of stages + static int const kStages = MmaThreadBlock::Base::kStages; + + /// Number of warp-level GEMM oeprations + static int const kWarpGemmIterations = MmaThreadBlock::kWarpGemmIterations; + + + // + // Warp-level gemm types + // + + // Warp-level gemm operator + using MmaWarp = typename MmaThreadBlock::Operator; + + // Warp-level gemm shape covering all kgroups + using WarpShape = typename MmaWarp::Shape; + + // Warp-level framents holding operands A & B operand and destination C + using WarpFragmentA = typename MmaWarp::FragmentA; + using WarpFragmentB = typename MmaWarp::FragmentB; + using WarpFragmentC = typename MmaWarp::FragmentC; + + // + // Instruction-level gemm types + // + + // Instruction-level gemm operator + using MmaInstruction = typename MmaWarp::Policy::Operator; + + // Instruction shape + using InstructionShape = typename MmaInstruction::Shape; + + // Instruction-level framents holding operands A & B operand and destination C + using InstructionFragmentA = typename MmaInstruction::FragmentA; + using InstructionFragmentB = typename MmaInstruction::FragmentB; + using InstructionFragmentC = typename MmaInstruction::FragmentC; + + // + // Testbed types + // + + // Vector of values holding user provided reference + using ReferenceVector = std::vector; + + // + // Data members + // + ReferenceVector references; + + // + // Methods + // + + TestbedSanity(ReferenceVector const &references_ = ReferenceVector()) : references(references_){ } + + // verify all parameter in ReferenceVector + bool verify() { + for(auto ref : references) + verify_parameter(ref); + return true; + } + + // verify parameter of type Reference + void verify_parameter(Reference const& ref) { + switch(ref.parameter_id) { + case ParameterID::kWarpFragmentASize : EXPECT_TRUE(WarpFragmentA::kElements == ref.value) << *this; break; + case ParameterID::kWarpFragmentBSize : EXPECT_TRUE(WarpFragmentB::kElements == ref.value) << *this; break; + case ParameterID::kWarpFragmentCSize : EXPECT_TRUE(WarpFragmentC::kElements == ref.value) << *this; break; + } + } + +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Overload output operators for TesbedSanity +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +template +std::ostream & operator<<(std::ostream &out, TestbedSanity const &test) { + + + out << "Gemm internal parameters" << std::endl + << " Threadblock-level parameters:" << std::endl + << " ThreadblockShape = " << typename TestbedSanity::ThreadblockShape() << std::endl + << " kStages = " << TestbedSanity::kStages << std::endl + << " kWarpGemmIterations = "<< TestbedSanity::kWarpGemmIterations << std::endl + <<" Shared memory sizes:" << std::endl + <<" SmemPaddingA = " << typename TestbedSanity::SmemPaddingA() << std::endl + <<" SmemPaddingB = " << typename TestbedSanity::SmemPaddingB() << std::endl + <<" SmemShapeA = " << typename TestbedSanity::SmemShapeA() << std::endl + <<" SmemShapeB = " << typename TestbedSanity::SmemShapeB() << std::endl + <<" Warp-level parameters" << std::endl + <<" WarpShape = " << typename TestbedSanity::WarpShape() << std::endl + <<" Fragment sizes:" << std::endl + <<" WarpFragmentA::kElements = " << TestbedSanity::WarpFragmentA::kElements << std::endl + <<" WarpFragmentB::kElements = " << TestbedSanity::WarpFragmentB::kElements << std::endl + <<" WarpFragmentC::kElements = " << TestbedSanity::WarpFragmentC::kElements << std::endl + <<" Instruction-level parameters" << std::endl + <<" InstructionShape = " << typename TestbedSanity::InstructionShape() << std::endl + <<" Fragment sizes:" << std::endl + <<" InstructionFragmentA::kElements = " << TestbedSanity::InstructionFragmentA::kElements << std::endl + <<" InstructionFragmentB::kElements = " << TestbedSanity::InstructionFragmentB::kElements << std::endl + <<" InstructionFragmentC::kElements = " << TestbedSanity::InstructionFragmentC::kElements << std::endl; + + return out; +} + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_sparse.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_sparse.h new file mode 100644 index 0000000000000000000000000000000000000000..1e521ea7bec3f439462040297e0c7beb1c7867c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_sparse.h @@ -0,0 +1,499 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface + + Testbed for sparse operations not to be released for CUDA 11.0 GA. Expected release is 11.1. +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/host_reorder.h" +#include "cutlass/util/host_uncompress.h" + +#include "testbed_utils.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct SparseTestbed { + + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using ElementC = typename Gemm::ElementC; + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute; + + static int const kSparse = Gemm::GemmKernel::kSparse; + static int const kMetaSizeInBits = Gemm::GemmKernel::kMetaSizeInBits; + static int const kMaxID2 = Gemm::GemmKernel::kMaxID2; + static int const kElementsPerElementE = Gemm::GemmKernel::kElementsPerElementE; + + using ElementE = typename Gemm::GemmKernel::ElementE; + using LayoutE = cutlass::layout::RowMajor; + using ReorderedLayoutE = typename Gemm::GemmKernel::LayoutE; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + cutlass::Distribution::Kind init_E; + uint64_t seed; + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_A_uncompressed; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D; + cutlass::HostTensor reference_D; + cutlass::HostTensor tensor_E; + cutlass::HostTensor tensor_E_reordered; + + // + // Methods + // + + SparseTestbed( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_E_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080) + : init_A(init_A_), + init_B(init_B_), + init_C(init_C_), + init_E(init_E_), + seed(seed_) {} + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size, bool tensor_C_row_broadcast = false) { + // + // Allocate the GEMM workspace + // + tensor_A.resize(cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); + tensor_A_uncompressed.resize(problem_size.mk()); + tensor_B.resize(problem_size.kn()); + if (tensor_C_row_broadcast) { + tensor_C.resize({problem_size.m(), 1}); + } else { + tensor_C.resize(problem_size.mn()); + } + + tensor_D.resize(problem_size.mn()); + reference_D.resize(problem_size.mn(), false); + tensor_E.resize(cutlass::make_Coord( + problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); + tensor_E_reordered.resize(cutlass::make_Coord( + problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); + + if (init_E == cutlass::Distribution::Uniform) { + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomSparseMeta( + tensor_E.host_view(), seed, kMetaSizeInBits); + } else if (init_E == cutlass::Distribution::Identity) { + uint32_t content = (kMaxID2 == 1) ? 0x44444444 : 0x4444; + cutlass::reference::host::TensorFill(tensor_E.host_view(), + (ElementE)(content)); + } else { + EXPECT_TRUE(false); + } + + cutlass::reorder_meta(tensor_E_reordered.host_ref(), tensor_E.host_ref(), + {problem_size.m(), problem_size.n(), + problem_size.k() / kSparse / kElementsPerElementE}); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1); + tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1); + tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1); + + if (tensor_C_row_broadcast) { + for (int i = 0; i < problem_size.m(); ++i) + for (int j = 0; j < problem_size.n(); ++j) + reference_D.host_view().at({i, j}) = tensor_C.host_view().at({i, 0}); + } else { + cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); + } + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + tensor_E_reordered.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + + if (tensor_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + + if (reference_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view()); + + EXPECT_TRUE(passed); + + if (!passed) { + + std::stringstream fname; + + fname << "error_Gemm_device_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << Gemm::ThreadblockShape::kM << "x" + << Gemm::ThreadblockShape::kN << "x" + << Gemm::ThreadblockShape::kK << "_" + << Gemm::WarpShape::kM << "x" + << Gemm::WarpShape::kN << "x" + << Gemm::WarpShape::kK << ".txt"; + + std::ofstream file(fname.str()); + + file + << "problem: " << problem_size + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << tensor_A.host_view() + << "\nB =\n" << tensor_B.host_view() + << "\nC =\n" << tensor_C.host_view() + << "\nE =\n" << tensor_E.host_view() + << "\n\nReference =\n" << reference_D.host_view() + << "\nComputed =\n" << tensor_D.host_view(); + } + + return passed; + } + + /// Verifies the result is a GEMM + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + // + // Verify + // + + cutlass::uncompress(tensor_A_uncompressed.host_ref(), tensor_A.host_ref(), + tensor_E.host_ref(), problem_size.m(), problem_size.k()); + + cutlass::reference::host::Gemm< + typename Gemm::ElementA, typename Gemm::LayoutA, + typename Gemm::ElementB, typename Gemm::LayoutB, + typename Gemm::ElementC, typename Gemm::LayoutC, + ElementCompute, + ElementAccumulator, typename Gemm::Operator> + reference_gemm; + + reference_gemm( + problem_size, + alpha, + tensor_A_uncompressed.host_ref(), + tensor_B.host_ref(), + beta, + reference_D.host_ref(), + ElementAccumulator(0) + ); + + return compare_reference(problem_size, alpha, beta); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmCoord problem_size, + int split_k_slices = 1, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0), + bool tensor_C_row_broadcast = false) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + this->initialize(problem_size, tensor_C_row_broadcast); + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + problem_size, + tensor_A.device_ref(), + tensor_B.device_ref(), + tensor_C.device_ref(), + tensor_D.device_ref(), + tensor_E_reordered.device_ref(), + {alpha, beta}, + split_k_slices + }; + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); + + // This failure is likely due to insufficient device capabilities. Waive the test. + if (status != cutlass::Status::kSuccess) { + return true; + } + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = this->verify(problem_size, alpha, beta); + + if (!passed) { + std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +bool TestAllSparseGemm(bool tensor_C_row_broadcast = false) { + bool passed = true; + + int const kMinimumOperandElementSize = + std::min( + int(cutlass::sizeof_bits::value), + int(cutlass::sizeof_bits::value)); + + // M dimension has to be multiple of 32 (sparse float) or 16 (sparse int) + // because of the reordering of operand E + int const kAlignmentM = std::max(((sizeof(typename Gemm::ElementE) == 2) ? 32 : 16), + kMinimumOperandElementSize); + + int const kAlignmentN = 128 / kMinimumOperandElementSize; + + int problem_size_m[] = {kAlignmentM, 512 - 3 * kAlignmentM}; + + int problem_size_n[] = {kAlignmentN, 512 - 2 * kAlignmentN}; + + int problem_size_k[] = {Gemm::ThreadblockShape::kK, + Gemm::ThreadblockShape::kK * (Gemm::kStages + 1)}; + + int split_k_slices[] = { + 1, 2, 3 + }; + + double problem_alpha[] = { + 1 + }; + + double problem_beta[] = { + 2.0 + }; + + SparseTestbed testbed; + + using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute; + + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + for (int split_k : split_k_slices) { + + if (!Gemm::kSplitKSerial && split_k > 1) { + continue; + } + + if (split_k > 1 && k / Gemm::ThreadblockShape::kK < split_k) { + continue; + } + + for (auto alpha : problem_alpha) { + for (auto beta : problem_beta) { + + cutlass::gemm::GemmCoord problem_size(m, n, k); + + passed = testbed.run( + problem_size, + split_k, + cutlass::from_real(alpha), + cutlass::from_real(beta), + tensor_C_row_broadcast + ); + + if (!passed) { + return false; + } + } + } + } + } + } + } + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_splitk.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_splitk.h new file mode 100644 index 0000000000000000000000000000000000000000..73dda7e383f2aa44faac265186770e6aa6a2df66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_splitk.h @@ -0,0 +1,218 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "testbed.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct TestbedSplitK : public Testbed { + + using Base = Testbed; + + using ElementCompute = typename Base::ElementCompute; + + // + // Methods + // + + TestbedSplitK( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + Base(init_A_, init_B_, init_C_, seed_) { } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmCoord problem_size, + int split_k_slices, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + this->initialize(problem_size); + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + problem_size, + this->tensor_A.device_ref(), + this->tensor_B.device_ref(), + this->tensor_C.device_ref(), + this->tensor_D.device_ref(), + {alpha, beta}, + split_k_slices + }; + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); + + EXPECT_TRUE(status == cutlass::Status::kSuccess); + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess); + + // + // Verify + // + + return this->verify(problem_size, alpha, beta); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +bool TestAllGemmSplitK() { + bool passed = true; + + cutlass::gemm::GemmCoord problem_sizes[] = { + {8, 8, 2048}, + {8, 8, 2056}, + {264, 72, 520}, + {264, 520, 120}, + {264, 520, 264} + }; + + int split_k_slices[] = { + 1, 2, 4, 5, 7 + }; + + double problem_alpha[] = { + 0.5 + }; + + double problem_beta[] = { + 2.0 + }; + + using Testbed = TestbedSplitK; + using ElementCompute = typename Testbed::ElementCompute; + + Testbed testbed; + + for (auto problem_size : problem_sizes) { + for (int split_k_count : split_k_slices) { + for (double alpha : problem_alpha) { + for (double beta : problem_beta) { + + passed = testbed.run( + problem_size, + split_k_count, + ElementCompute(alpha), + ElementCompute(beta) + ); + + if (!passed) { + std::cout << "Failed on size " << problem_size << " with split_k_count " << split_k_count << std::endl; + return false; + } + } + } + } + } + + EXPECT_TRUE(passed); + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_symm_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_symm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..1050a2edcc40a86f7742cb3cf158babadedeadd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_symm_universal.h @@ -0,0 +1,592 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide Symm update interface + +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/error_metrics.h" +#include "cutlass/util/reference/host/symm.h" +#include "cutlass/util/reference/host/symm_complex.h" + +#include "testbed_utils.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct TestbedSymmUniversal { + + using ElementA = typename Symm::ElementA; + using ElementB = typename Symm::ElementB; + using ElementC = typename Symm::ElementC; + using ElementAccumulator = typename Symm::ElementAccumulator; + using ElementCompute = typename Symm::SymmKernel::Epilogue::OutputOp::ElementCompute; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D; + cutlass::HostTensor reference_D; + + // + // Methods + // + + TestbedSymmUniversal( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed, + int mantissa_in_bits) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + + EXPECT_TRUE(false) << "Input distribution not implemented"; + return false; + } + + return true; + } + + + /// Helper to initialize a tensor view + template + bool initialize_symmetric_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed, + int mantissa_in_bits) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillSymmetricRandomUniform( + view, seed, Symm::kFillModeA, scope_max, scope_min, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillSymmetricRandomGaussian( + view, seed, Symm::kFillModeA, 0, 0.5, mantissa_in_bits); + } + else { + + EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented"; + return false; + } + + return true; + } + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size) { + // + // Allocate the Symm workspace + // + + if (Symm::kSideModeA == cutlass::SideMode::kLeft) { + tensor_A.resize(cutlass::make_Coord(problem_size.m(),problem_size.m())); + } + else if (Symm::kSideModeA == cutlass::SideMode::kRight) { + tensor_A.resize(cutlass::make_Coord(problem_size.n(),problem_size.n())); + } + + tensor_B.resize(problem_size.mn()); + tensor_C.resize(problem_size.mn()); + tensor_D.resize(problem_size.mn()); + reference_D.resize(problem_size.mn(), false); + + EXPECT_TRUE(initialize_symmetric_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits::bits)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018, cutlass::MantissaInBits::bits)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits::bits)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = typename Symm::ElementA(1); + tensor_B.host_view().at({0, 0}) = typename Symm::ElementB(1); + tensor_C.host_view().at({0, 0}) = typename Symm::ElementC(1); + + cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + + if (tensor_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + + if (reference_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view()); + + bool passed = l2_norm < cutlass::MantissaInBits::error; + + return passed; + } + + /// Verifies the result is a Symm + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + // + // Verify + // + + using HostReference = typename cutlass::platform::conditional< + (cutlass::platform::is_same + >::value || + cutlass::platform::is_same + >::value + ), + cutlass::reference::host::SymmComplex< + typename Symm::ElementA, typename Symm::LayoutA, + Symm::kSideModeA, Symm::kFillModeA, + typename Symm::ElementB, typename Symm::LayoutB, + typename Symm::ElementC, typename Symm::LayoutC, + ElementCompute, + ElementAccumulator, + Symm::kBlasMode>, + cutlass::reference::host::Symm< + typename Symm::ElementA, typename Symm::LayoutA, + Symm::kSideModeA, Symm::kFillModeA, + typename Symm::ElementB, typename Symm::LayoutB, + typename Symm::ElementC, typename Symm::LayoutC, + ElementCompute, + ElementAccumulator> + >::type; + + + HostReference reference_symm; + + reference_symm( + problem_size, + alpha, + tensor_A.host_ref(), + tensor_B.host_ref(), + beta, + tensor_C.host_ref(), + reference_D.host_ref(), + ElementAccumulator(0) + ); + + return compare_reference(problem_size, alpha, beta); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Symm::SymmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmUniversalMode mode, + cutlass::gemm::GemmCoord problem_size, + int batch_count = 1, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + +#if 0 + std::cout << "[TestbedSymmUniversal::run()] problem(m, n, k): " << problem_size + << " alpha: " << ElementCompute(alpha) + << " beta: " << ElementCompute(beta) << std::endl; +#endif + + this->initialize(problem_size); + + // + // Initialize the Symm operator + // + + int batch_stride_A; + if (Symm::kSideModeA == cutlass::SideMode::kLeft) + batch_stride_A = problem_size.m()*problem_size.m(); + if (Symm::kSideModeA == cutlass::SideMode::kRight) + batch_stride_A = problem_size.n()*problem_size.n(); + + typename Symm::Arguments arguments{ + mode, + problem_size, + batch_count, + {alpha, beta}, + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data(), + tensor_D.device_data(), + batch_stride_A, + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + tensor_A.layout().stride(0), + tensor_B.layout().stride(0), + tensor_C.layout().stride(0), + tensor_D.layout().stride(0) + }; + + Symm symm_op; + + size_t workspace_size = Symm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = symm_op.initialize(arguments, workspace.get()); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Run the Symm + // + + status = symm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = this->verify(problem_size, alpha, beta); + + //if (true) { + if (!passed) { + std::stringstream fname; + + fname << "error_" + << (Symm::kBlasMode == cutlass::BlasMode::kSymmetric ? "symm_" : "hemm_" ) + << "device_" + << "fill_mode_a_" + << (Symm::kSideModeA == cutlass::SideMode::kLeft ? "leftside_" : + (Symm::kSideModeA == cutlass::SideMode::kRight ? "rightside_" : "invalid_")) + << (Symm::kFillModeA == cutlass::FillMode::kLower ? "lower_" : + (Symm::kFillModeA == cutlass::FillMode::kUpper ? "upper_" : "invalid_")) + << "mnk_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << Symm::ThreadblockShape::kM << "x" + << Symm::ThreadblockShape::kN << "x" + << Symm::ThreadblockShape::kK << "_" + << Symm::WarpShape::kM << "x" + << Symm::WarpShape::kN << "x" + << Symm::WarpShape::kK << ".txt"; + + std::cout << fname.str() << std::endl; + + std::ofstream results(fname.str()); + + results << problem_size << std::endl; + + results + << "alpha: " << ElementCompute(alpha) << "\n" + << "beta: " << ElementCompute(beta) << "\n" + << "\nA:\n" << tensor_A.host_view() << "\n" + << "\nB:\n" << tensor_B.host_view() << "\n" + << "\nC:\n" << tensor_C.host_view() << "\n" + << "\nD reference:\n" << reference_D.host_view() << "\n" + << "\nD computed:\n" << tensor_D.host_view() << "\n"; + + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +template +bool TestsymmUniversal( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmUniversalMode mode, + int batch_count, + double alpha = 1.0, + double beta = 2.0) { + + bool passed = true; + + TestbedSymmUniversal testbed; + + using ElementCompute = typename Symm::EpilogueOutputOp::ElementCompute; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + return passed; +} + +template +bool TestAllSymmUniversal() { + bool passed = true; + + + int const kMinimumOperandElementSize = int(cutlass::sizeof_bits::value); + + int const kAlignment = cutlass::platform::is_same< + typename Symm::OperatorClass, + cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; + + // int8_t gemm alignment constraints + int const kAlignmentM = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value ? 4 : kAlignment; + + int const kAlignmentN = kAlignmentM; + + int const kAlignmentK = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value + ? 4 : kAlignment; + + cutlass::gemm::GemmUniversalMode modes[] = { + cutlass::gemm::GemmUniversalMode::kGemm, + }; + + int problem_size_m[] = { + kAlignmentK, + Symm::ThreadblockShape::kK * Symm::kStages - kAlignmentK, + Symm::ThreadblockShape::kK * Symm::kStages * 3 - kAlignmentK + }; + + int problem_size_n[] = { + kAlignmentN, 512 - 2*kAlignmentN + }; + + int batch_counts[] = { // may be interpretted as batch count or split-K slices + 1 // Just running one batch for now (removing 2, 3, 5, 7) + }; + + double problem_alpha[] = { + 1.0, 3.0 + }; + + double problem_beta[] = { + 0, 2.0 + }; + + + using ElementCompute = typename Symm::EpilogueOutputOp::ElementCompute; + + for (cutlass::gemm::GemmUniversalMode mode : modes) { + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int batch_count : batch_counts) { + + for (auto alpha : problem_alpha) { + for (auto beta : problem_beta) { + + int k = 0; + if (Symm::kSideModeA == cutlass::SideMode::kLeft) + k = m; + else if (Symm::kSideModeA == cutlass::SideMode::kRight) + k = n; + + if (mode == cutlass::gemm::GemmUniversalMode::kGemm || + mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { + + #if 0 + // skip very small K problems + if (k / batch_count < 2 * Symm::ThreadblockShape::kK) { + continue; + } + #endif + } + + cutlass::gemm::GemmCoord problem_size(m, n, k); + + TestbedSymmUniversal testbed; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + if (!passed) { + return false; + } + } + } + } + } + } + } + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_trmm_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_trmm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..422d58b9a0821710d0aac1f03e9692b66cc8e322 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_trmm_universal.h @@ -0,0 +1,606 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/error_metrics.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/trmm_complex.h" +#include "cutlass/core_io.h" + +#include "testbed_utils.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct TestbedTrmmUniversal { + + using ElementA = typename Trmm::ElementA; + using ElementB = typename Trmm::ElementB; + using ElementC = typename Trmm::ElementC; + using ElementAccumulator = typename Trmm::ElementAccumulator; + using ElementCompute = typename Trmm::TrmmKernel::Epilogue::OutputOp::ElementCompute; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_D; + uint64_t seed; + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_D; + cutlass::HostTensor reference_D; + + // + // Methods + // + + TestbedTrmmUniversal( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_D_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + init_A(init_A_), init_B(init_B_), init_D(init_D_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed, + int mantissa_in_bits) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + + /// Helper to initialize a tensor view + template + bool initialize_symmetric_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed, + int mantissa_in_bits) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillSymmetricRandomUniform( + view, seed, Trmm::kFillMode, scope_max, scope_min, mantissa_in_bits); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillSymmetricRandomGaussian( + view, seed, Trmm::kFillMode, 0, 0.5, mantissa_in_bits); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Helper to initialize a tensor view (pad diagonal fill with zeros for up to alignment on wrong side of diagonal) + template + bool initialize_pad_diagonal_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed, + int alignment) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillPadDiagonalRandomUniform( + view, seed, Trmm::kFillMode, scope_max, scope_min, 0, alignment); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + EXPECT_TRUE(false) << "Gaussian distribution for pad diagonal not implemented"; + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size) { + // + // Allocate the TRMM workspace + // + + if (Trmm::kSideMode == cutlass::SideMode::kLeft) { + tensor_A.resize(cutlass::make_Coord(problem_size.m(),problem_size.m())); + } + else if (Trmm::kSideMode == cutlass::SideMode::kRight) { + tensor_A.resize(cutlass::make_Coord(problem_size.n(),problem_size.n())); + } + + tensor_B.resize(problem_size.mn()); + tensor_D.resize(problem_size.mn()); + reference_D.resize(problem_size.mn(), false); + + //EXPECT_TRUE(initialize_symmetric_tensor(tensor_A.host_view(), init_A, seed + 2017)); + //EXPECT_TRUE(initialize_pad_diagonal_tensor(tensor_A.host_view(), init_A, seed + 2017, Trmm::kAlignmentA)); + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2017, cutlass::MantissaInBits::bits)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2019, cutlass::MantissaInBits::bits)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + tensor_A.host_view().at({0, 0}) = typename Trmm::ElementA(1); + tensor_B.host_view().at({0, 0}) = typename Trmm::ElementB(1); + + cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_D.host_view()); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_D.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha) { + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + + if (tensor_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + + if (reference_D.size() > 1) + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view()); + + bool passed = l2_norm < cutlass::MantissaInBits::error; + + return passed; + } + + /// Verifies the result is a TRMM + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha) { + + // + // Verify + // + + using HostReference = typename cutlass::platform::conditional< + (cutlass::platform::is_same + >::value || + cutlass::platform::is_same + >::value + ), + cutlass::reference::host::TrmmComplex< + typename Trmm::ElementA, typename Trmm::LayoutA, + Trmm::kTransformA, + Trmm::kSideMode, Trmm::kFillMode, Trmm::kDiagType, + typename Trmm::ElementB, typename Trmm::LayoutB, + Trmm::kTransformB, + typename Trmm::ElementC, typename Trmm::LayoutC, + ElementCompute, + ElementAccumulator>, + cutlass::reference::host::Trmm< + typename Trmm::ElementA, typename Trmm::LayoutA, + Trmm::kSideMode, Trmm::kFillMode, Trmm::kDiagType, + typename Trmm::ElementB, typename Trmm::LayoutB, + typename Trmm::ElementC, typename Trmm::LayoutC, + ElementCompute, + ElementAccumulator> + >::type; + + + HostReference reference_trmm; + + reference_trmm( + problem_size, + alpha, + tensor_A.host_ref(), + tensor_B.host_ref(), + reference_D.host_ref(), + ElementAccumulator(0) + ); + + return compare_reference(problem_size, alpha); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Trmm::TrmmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmUniversalMode mode, + cutlass::gemm::GemmCoord problem_size, + int batch_count = 1, + ElementCompute alpha = ElementCompute(1)) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + +#if 0 + std::cout << "[TestbedTrmmUniversal::run()] problem(m, n, k): " << problem_size + << " alpha: " << ElementCompute(alpha) << std::endl; +#endif + + this->initialize(problem_size); + + // + // Initialize the TRMM operator + // + + int batch_stride_A; + if (Trmm::kSideMode == cutlass::SideMode::kLeft) + batch_stride_A = problem_size.m()*problem_size.m(); + if (Trmm::kSideMode == cutlass::SideMode::kRight) + batch_stride_A = problem_size.n()*problem_size.n(); + + typename Trmm::Arguments arguments{ + mode, + problem_size, + batch_count, + {alpha}, + tensor_A.device_data(), + tensor_B.device_data(), + tensor_D.device_data(), + batch_stride_A, + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + tensor_A.layout().stride(0), + tensor_B.layout().stride(0), + tensor_D.layout().stride(0) + }; + + Trmm trmm_op; + + size_t workspace_size = Trmm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = trmm_op.initialize(arguments, workspace.get()); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Run the TRMM + // + + status = trmm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + bool passed = this->verify(problem_size, alpha); + + if (!passed) { + std::stringstream fname; + + fname << "error_Trmm_device_" + << "fill_mode_" + << (Trmm::kFillMode == cutlass::FillMode::kLower ? "lower_" : + (Trmm::kFillMode == cutlass::FillMode::kUpper ? "upper_" : "invalid_")) + << "side_mode_" + << (Trmm::kSideMode == cutlass::SideMode::kLeft ? "left_" : + (Trmm::kSideMode == cutlass::SideMode::kRight ? "right_" : "invalid_")) + << "mnk_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << Trmm::ThreadblockShape::kM << "x" + << Trmm::ThreadblockShape::kN << "x" + << Trmm::ThreadblockShape::kK << "_" + << Trmm::WarpShape::kM << "x" + << Trmm::WarpShape::kN << "x" + << Trmm::WarpShape::kK << ".txt"; + + std::cout << fname.str() << std::endl; + + std::ofstream results(fname.str()); + + results << problem_size << std::endl; + + results + << "\nA:\n" << tensor_A.host_view() << "\n" + << "\nB:\n" << tensor_B.host_view() << "\n" + << "\nD reference:\n" << reference_D.host_view() << "\n" + << "\nD computed:\n" << tensor_D.host_view() << "\n"; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +template +bool TestTrmmUniversal( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmUniversalMode mode, + int batch_count, + double alpha = 1.0) { + + bool passed = true; + + TestbedTrmmUniversal testbed; + + using ElementCompute = typename Trmm::EpilogueOutputOp::ElementCompute; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha) + ); + + return passed; +} + +template +bool TestAllTrmmUniversal() { + bool passed = true; + + int const kMinimumOperandElementSize = int(cutlass::sizeof_bits::value); + + int const kAlignment = cutlass::platform::is_same< + typename Trmm::OperatorClass, + cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; + + // int8_t gemm alignment constraints + int const kAlignmentM = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value ? 4 : kAlignment; + + int const kAlignmentN = kAlignmentM; + + int const kAlignmentK = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value + ? 4 : kAlignment; + + cutlass::gemm::GemmUniversalMode modes[] = { + cutlass::gemm::GemmUniversalMode::kGemm, + }; + + int problem_size_m[] = { + kAlignmentK, + Trmm::ThreadblockShape::kK * Trmm::kStages - kAlignmentK, + Trmm::ThreadblockShape::kK * Trmm::kStages * 3 - kAlignmentK + }; + + int problem_size_n[] = { + kAlignmentN, 512 - 2*kAlignmentN + }; + + int batch_counts[] = { // may be interpretted as batch count or split-K slices + 1 // Just running one batch for now (removing 2, 3, 5, 7) + }; + + double problem_alpha[] = { + 1.0, 2.0 + }; + + using ElementCompute = typename Trmm::EpilogueOutputOp::ElementCompute; + + for (cutlass::gemm::GemmUniversalMode mode : modes) { + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int batch_count : batch_counts) { + for (auto alpha : problem_alpha) { + + int k = 0; + if (Trmm::kSideMode == cutlass::SideMode::kLeft) + k = m; + else if (Trmm::kSideMode == cutlass::SideMode::kRight) + k = n; + + if (mode == cutlass::gemm::GemmUniversalMode::kGemm || + mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { + +#if 0 + // skip very small K problems + if (k / batch_count < 2 * Trmm::ThreadblockShape::kK) { + continue; + } +#endif + } + + cutlass::gemm::GemmCoord problem_size(m, n, k); + + TestbedTrmmUniversal testbed; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha) + ); + + if (!passed) { + return false; + } + } + } + } + } + } + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..a849b593b9e9ad6ad8f7b226b4f484f6d64a5fa4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_universal.h @@ -0,0 +1,546 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/gemm_complex.h" + +#include "testbed_utils.h" + +namespace test { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct TestbedUniversal { + + using ElementA = typename Gemm::ElementA; + using ElementB = typename Gemm::ElementB; + using ElementC = typename Gemm::ElementC; + using ElementAccumulator = typename Gemm::ElementAccumulator; + using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute; + + /// Initialization + cutlass::Distribution::Kind init_A; + cutlass::Distribution::Kind init_B; + cutlass::Distribution::Kind init_C; + uint64_t seed; + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D; + cutlass::HostTensor reference_D; + + // + // Methods + // + + TestbedUniversal( + cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, + uint64_t seed_ = 2080 + ): + init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } + + /// Helper to initialize a tensor view + template + bool initialize_tensor( + cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + + double scope_max, scope_min; + int bits_input = cutlass::sizeof_bits::value; + int bits_output = cutlass::sizeof_bits::value; + + if (bits_input == 1) { + scope_max = 2; + scope_min = 0; + } else if (bits_input <= 8) { + scope_max = 2; + scope_min = -2; + } else if (bits_output == 16) { + scope_max = 5; + scope_min = -5; + } else { + scope_max = 8; + scope_min = -8; + } + + cutlass::reference::host::TensorFillRandomUniform( + view, seed, scope_max, scope_min, 0); + } + else if (dist_kind == cutlass::Distribution::Identity) { + + cutlass::reference::host::TensorFillIdentity(view); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); + } + else if (dist_kind == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential( + view.data(), view.capacity()); + } + else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Initializes data structures + void initialize(cutlass::gemm::GemmCoord problem_size) { + // + // Allocate the GEMM workspace + // + + tensor_A.resize(problem_size.mk()); + tensor_B.resize(problem_size.kn()); + tensor_C.resize(problem_size.mn()); + tensor_D.resize(problem_size.mn()); + reference_D.resize(problem_size.mn(), false); + + EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); + EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); + EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); + + // It is possible to randomly initialize to all zeros, so override this with non-zeros + // in the upper left corner of each operand. + cutlass::Coord<2> origin(0); + tensor_A.host_view().at(origin) = typename Gemm::ElementA(1); + tensor_B.host_view().at(origin) = typename Gemm::ElementB(1); + tensor_C.host_view().at(origin) = typename Gemm::ElementC(1); + + cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D.sync_device(); + } + + /// Compares computed reference with device reference and outputs to a file if incorrect + bool compare_reference( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + tensor_D.sync_host(); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); + + EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); + + bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view()); + + EXPECT_TRUE(passed) << " mismatched reference"; + + if (!passed) { + + /* + std::stringstream fname; + + fname << "error_Gemm_device_" + << problem_size.m() << "x" + << problem_size.n() << "x" + << problem_size.k() << "_" + << Gemm::ThreadblockShape::kM << "x" + << Gemm::ThreadblockShape::kN << "x" + << Gemm::ThreadblockShape::kK << "_" + << Gemm::WarpShape::kM << "x" + << Gemm::WarpShape::kN << "x" + << Gemm::WarpShape::kK << ".txt"; + + std::ofstream file(fname.str()); + */ + + std::ofstream file("testbed_universal_errors.txt"); + + file + << "problem: " << problem_size + << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; + + file + << "A =\n" << tensor_A.host_view() + << "\nB =\n" << tensor_B.host_view() + << "\nC =\n" << tensor_C.host_view() + << "\n\nReference =\n" << reference_D.host_view() + << "\nComputed =\n" << tensor_D.host_view(); + } + + return passed; + } + + /// Verifies the result is a GEMM + bool verify( + cutlass::gemm::GemmCoord problem_size, + ElementCompute alpha, + ElementCompute beta) { + + // + // Verify + // + + cutlass::reference::host::GemmComplex< + typename Gemm::ElementA, typename Gemm::LayoutA, + typename Gemm::ElementB, typename Gemm::LayoutB, + typename Gemm::ElementC, typename Gemm::LayoutC, + ElementCompute, ElementAccumulator + >( + problem_size, + alpha, + tensor_A.host_ref(), + Gemm::kTransformA, + tensor_B.host_ref(), + Gemm::kTransformB, + beta, + tensor_C.host_ref(), + reference_D.host_ref(), + ElementAccumulator(0) + ); + + if (Relu) { + for (int i = 0; i < problem_size.m(); ++i) { + for (int j = 0; j < problem_size.n(); ++j) { + reference_D.at(cutlass::MatrixCoord(i, j)) = + ((ElementCompute)reference_D.at(cutlass::MatrixCoord(i, j)) < (ElementCompute)0) + ? (typename Gemm::ElementC)0 + : reference_D.at(cutlass::MatrixCoord(i, j)); + } + } + } + + return compare_reference(problem_size, alpha, beta); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.sharedMemPerBlockOptin < smem_size) { + return false; + } + + return true; + } + + /// Executes one test + bool run( + cutlass::gemm::GemmUniversalMode mode, + cutlass::gemm::GemmCoord problem_size, + int batch_count = 1, + ElementCompute alpha = ElementCompute(1), + ElementCompute beta = ElementCompute(0)) + { +/* + std::cout << "\n-----------------------\n"; + std::cout << "mode: " << (int) mode << "\n"; + std::cout << "problem size: " << problem_size << "\n"; + std::cout << "batch_count: " << batch_count << "\n"; + std::cout << "alpha: " << alpha << "\n"; + std::cout << "beta: " << beta << "\n"; + std::cout << "-----------------------\n\n"; +*/ + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + this->initialize(problem_size); + + // + // Initialize the GEMM operator + // + + typename Gemm::Arguments arguments{ + mode, + problem_size, + batch_count, + {alpha, beta}, + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data(), + tensor_D.device_data(), + problem_size.m() * problem_size.k(), + problem_size.n() * problem_size.k(), + problem_size.m() * problem_size.n(), + problem_size.m() * problem_size.n(), + tensor_A.layout().stride(0), + tensor_B.layout().stride(0), + tensor_C.layout().stride(0), + tensor_D.layout().stride(0) + }; + + Gemm gemm_op; + + size_t workspace_size = Gemm::get_workspace_size(arguments); + + cutlass::device_memory::allocation workspace(workspace_size); + + cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Run the GEMM + // + + status = gemm_op(); + + EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); + + // + // Verify + // + + bool passed = this->verify(problem_size, alpha, beta); + + if (!passed) { + std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +template +bool TestGemmUniversal( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmUniversalMode mode, + int batch_count, + double alpha = 1.0, + double beta = 2.0) { + + bool passed = true; + + TestbedUniversal testbed; + + using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + return passed; +} + +template +bool TestAllGemmUniversal() { + bool passed = true; + + + int const kMinimumOperandElementSize = + std::min( + int(cutlass::sizeof_bits::value), + int(cutlass::sizeof_bits::value)); + + int const kAlignment = cutlass::platform::is_same< + typename Gemm::OperatorClass, + cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; + + // int8_t gemm alignment constraints + int const kAlignmentM = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value ? 4 : kAlignment; + + int const kAlignmentN = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value ? 4 : kAlignment; + + int const kAlignmentK = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + (cutlass::platform::is_same::value || + cutlass::platform::is_same::value) ? 4 : kAlignment; + + + + cutlass::gemm::GemmUniversalMode modes[] = { + cutlass::gemm::GemmUniversalMode::kGemm, + }; + + int problem_size_m[] = { + kAlignmentM, 512 - 3*kAlignmentM + }; + + int problem_size_n[] = { + kAlignmentN, 512 - 2*kAlignmentN + }; + + int problem_size_k[] = { + kAlignmentK, + Gemm::ThreadblockShape::kK * Gemm::kStages - kAlignmentK, + Gemm::ThreadblockShape::kK * Gemm::kStages * 3 - kAlignmentK + }; + + int batch_counts[] = { // may be interpretted as batch count or split-K slices + 1, 2, 3, 5, 7 + }; + + double problem_alpha[] = { + 1 + }; + + double problem_beta[] = { + 2.0 + }; + + + using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute; + + for (cutlass::gemm::GemmUniversalMode mode : modes) { + for (int m : problem_size_m) { + for (int n : problem_size_n) { + for (int k : problem_size_k) { + for (int batch_count : batch_counts) { + + for (auto alpha : problem_alpha) { + for (auto beta : problem_beta) { + + if (mode == cutlass::gemm::GemmUniversalMode::kGemm || + mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { + + // skip very small K problems + if (k / batch_count < 2 * Gemm::ThreadblockShape::kK) { + continue; + } + } + + cutlass::gemm::GemmCoord problem_size(m, n, k); + + TestbedUniversal testbed; + + passed = testbed.run( + mode, + problem_size, + batch_count, + cutlass::from_real(alpha), + cutlass::from_real(beta) + ); + + if (!passed) { + return false; + } + } + } + } + } + } + } + } + + /* + // large problem with high coverage + for (int split_k_slices = 1; split_k_slices <= 3; ++split_k_slices) { + TestbedUniversal testbed; + + cutlass::gemm::GemmCoord problem_size(72, 56, 8192); + + passed = testbed.run( + cutlass::gemm::GemmUniversalMode::kGemm, + problem_size, + split_k_slices, + cutlass::from_real(1.0), + cutlass::from_real(2.0) + ); + + if (!passed) { + break; + } + } + */ + + return passed; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_utils.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..e47ecdab9a04a3875cbf5e3d136da22e0b537026 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/testbed_utils.h @@ -0,0 +1,53 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +inline char const *to_string(cutlass::Status status) { + + switch (status) { + case cutlass::Status::kSuccess: return "kSuccess"; + case cutlass::Status::kErrorMisalignedOperand: return "kErrorMisalignedOperand"; + case cutlass::Status::kErrorInvalidLayout: return "kErrorInvalidLayout"; + case cutlass::Status::kErrorInvalidProblem: return "kErrorInvalidProblem"; + case cutlass::Status::kErrorNotSupported: return "kErrorNotSupported"; + case cutlass::Status::kErrorWorkspaceNull: return "kErrorWorkspaceNull"; + case cutlass::Status::kErrorInternal: return "kErrorInternal"; + case cutlass::Status::kInvalid: return "kInvalid"; + default: break; + } + return "invalid"; +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf32n_cf32n_cf32t_tensor_op_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf32n_cf32n_cf32t_tensor_op_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..1e314022d5bb3007de0c0cc274de9d2c97d25563 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf32n_cf32n_cf32t_tensor_op_f32_sm80.cu @@ -0,0 +1,305 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_ls_l_nu_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_ls_u_nu_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_rs_u_nu_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_ls_l_un_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_ls_u_un_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_rs_u_un_tensor_op_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf32n_cf32n_cf32t_tensor_op_fast_f32_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf32n_cf32n_cf32t_tensor_op_fast_f32_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..8dc41a441ea100d8ad0f47e1fd63976ee770caf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf32n_cf32n_cf32t_tensor_op_fast_f32_sm80.cu @@ -0,0 +1,305 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_ls_l_nu_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_ls_u_nu_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_rs_u_nu_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_ls_l_un_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_ls_u_un_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf32n_cf32t_cf32t_rs_u_un_tensor_op_fast_f32, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplexFastF32, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64_cf64_cf64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64_cf64_cf64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..ba8084d3ac957f1c35059abc5b382b2e807c74c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64_cf64_cf64_tensor_op_f64_sm90.cu @@ -0,0 +1,136 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Trmm_cf64n_cf64n_cf64t_ls_u_nu_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Trmm_cf64h_cf64n_cf64t_ls_u_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64n_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64n_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..b26a8d2a12854ff846b683cff56e87d3a814ee19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64n_cf64n_cf64t_tensor_op_f64_gaussian_sm80.cu @@ -0,0 +1,137 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf64n_cf64n_cf64t_ls_u_nu_tensor_op_f64_gaussian, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf64h_cf64n_cf64t_ls_u_nu_tensor_op_f64_gaussian, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddGaussianComplex, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64n_cf64n_cf64t_tensor_op_f64_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64n_cf64n_cf64t_tensor_op_f64_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..2db3d2cce89f64382600ad80a889c00668a07dc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_cf64n_cf64n_cf64t_tensor_op_f64_sm80.cu @@ -0,0 +1,301 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf64n_cf64n_cf64t_ls_l_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf64n_cf64n_cf64t_ls_u_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf64h_cf64n_cf64t_ls_u_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf64n_cf64n_cf64t_ls_l_un_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf64n_cf64n_cf64t_ls_u_un_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kNone + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_cf64h_cf64n_cf64t_ls_u_un_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = cutlass::complex; + using ElementAccumulator = cutlass::complex; + + using Trmm = cutlass::gemm::device::Trmm< + cutlass::complex, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kUnit, + cutlass::complex, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddComplex, + cutlass::ComplexTransform::kConjugate + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..d8ad244fd89976a088d59cad5e96678fb38086e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_ls_sm80.cu @@ -0,0 +1,500 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Trmm_{ElementA}{LayoutA}_{ElementB}{LayoutB}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _{DiagType}_tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_l_nu_tensor_op_fast_f32_align1_align1, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_l_nu_tensor_op_fast_f32_align1_align4, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_l_nu_tensor_op_fast_f32_align1_align4, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_l_nu_tensor_op_fast_f32_align1_align4, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_l_nu_tensor_op_fast_f32_align1_align4, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_l_nu_tensor_op_fast_f32_align1_align4, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 256x128x16_128x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 128x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 256x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..a9ed92181368b2452f746f0e842c18a6ffa4bb51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_rs_sm80.cu @@ -0,0 +1,252 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Trmm_{ElementA}{LayoutA}_{ElementB}{LayoutB}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _{DiagType}_tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_rs_u_nu_tensor_op_fast_f32_align1_align1, 64x128x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_rs_u_nu_tensor_op_fast_f32_align1_align1, 128x64x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_rs_l_nu_tensor_op_fast_f32_align1_align1, 64x128x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kLower, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_rs_u_nu_tensor_op_fast_f32_align1_align4, 64x128x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_rs_u_nu_tensor_op_fast_f32_align1_align4, 128x64x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32n_f32t_f32t_rs_l_nu_tensor_op_fast_f32_align1_align4, 64x128x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kLower, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32t_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32t_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..56c639639ceb4c6c456dedef282cb0d93ad00386 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32t_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu @@ -0,0 +1,449 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_l_nu_tensor_op_fast_f32_align1_align1, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, cutlass::FillMode::kLower, cutlass::DiagType::kNonUnit, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_l_nu_tensor_op_fast_f32_align1_align4, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_l_nu_tensor_op_fast_f32_align1_align4, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_l_nu_tensor_op_fast_f32_align1_align4, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_l_nu_tensor_op_fast_f32_align1_align4, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_u_nu_tensor_op_fast_f32_align1_align4, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_u_nu_tensor_op_fast_f32_align1_align4, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_u_nu_tensor_op_fast_f32_align1_align4, 256x128x16_128x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_u_nu_tensor_op_fast_f32_align1_align4, 128x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32n_ls_u_nu_tensor_op_fast_f32_align1_align4, 256x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32t_f32n_f32t_tensor_op_fast_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32t_f32n_f32t_tensor_op_fast_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..9217ebd4215c28ef786451c82caa6ce66bba7d01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f32t_f32n_f32t_tensor_op_fast_f32_ls_sm80.cu @@ -0,0 +1,458 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Trmm_{ElementA}{LayoutA}_{ElementB}{LayoutB}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _{DiagType}_tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_l_un_tensor_op_fast_f32_align1_align1, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_l_un_tensor_op_fast_f32_align1_align4, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_l_un_tensor_op_fast_f32_align1_align4, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_l_un_tensor_op_fast_f32_align1_align4, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_l_nu_tensor_op_fast_f32_align1_align4, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_u_un_tensor_op_fast_f32_align1_align4, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 256x128x16_128x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 128x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f32t_f32n_f32t_ls_u_nu_tensor_op_fast_f32_align1_align4, 256x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAddFastF32 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64_f64_f64_tensor_op_f64_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64_f64_f64_tensor_op_f64_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..ffcefafefc5988952587eeb3d2e727e6b2258f17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64_f64_f64_tensor_op_f64_sm90.cu @@ -0,0 +1,126 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Trmm_f64n_f64n_f64t_rs_l_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_Device_Trmm_f64t_f64t_f64n_rs_l_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm90, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64n_f64n_f64t_tensor_op_f64_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64n_f64n_f64t_tensor_op_f64_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..0dd9064fb4160db78e37f9f3db70eb557a46a5ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64n_f64n_f64t_tensor_op_f64_ls_sm80.cu @@ -0,0 +1,414 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 64x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 64x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64n_f64n_f64t_tensor_op_f64_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64n_f64n_f64t_tensor_op_f64_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..f00e50ca4119f6621a10a17f6c781b528dbad496 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64n_f64n_f64t_tensor_op_f64_rs_sm80.cu @@ -0,0 +1,415 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_l_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_l_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_l_nu_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_l_nu_tensor_op_f64, 64x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_l_nu_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_u_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_u_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_u_nu_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_u_nu_tensor_op_f64, 64x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64n_f64n_f64t_rs_u_nu_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64t_f64t_f64n_tensor_op_f64_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64t_f64t_f64n_tensor_op_f64_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..bb4443d02b9204e1c44fec1d36a8a8abfa60bb6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_f64t_f64t_f64n_tensor_op_f64_ls_sm80.cu @@ -0,0 +1,414 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_l_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_l_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_l_nu_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_l_nu_tensor_op_f64, 64x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_l_nu_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_u_nu_tensor_op_f64, 32x32x16_16x16x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 16, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_u_nu_tensor_op_f64, 64x64x16_32x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_u_nu_tensor_op_f64, 128x64x16_64x32x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<64, 32, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_u_nu_tensor_op_f64, 64x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_f64t_f64t_f64n_ls_u_nu_tensor_op_f64, 128x128x16_32x64x16) { + + using ElementOutput = double; + using ElementAccumulator = double; + + using Trmm = cutlass::gemm::device::Trmm< + double, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + double, + cutlass::layout::RowMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<32, 64, 16>, + cutlass::gemm::GemmShape<8, 8, 4>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3 + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32n_tf32t_f32t_tensor_op_f32_rs_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32n_tf32t_f32t_tensor_op_f32_rs_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..723af641aad17a6558a61dd3cd2a221cde78a888 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32n_tf32t_f32t_tensor_op_f32_rs_sm80.cu @@ -0,0 +1,252 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Trmm_{ElementA}{LayoutA}_{ElementB}{LayoutB}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _{DiagType}_tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32n_tf32t_f32t_rs_u_nu_tensor_op_f32_align1_align1, 64x128x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32n_tf32t_f32t_rs_u_nu_tensor_op_f32_align1_align1, 128x64x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32n_tf32t_f32t_rs_l_nu_tensor_op_f32_align1_align1, 64x128x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kLower, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +TEST(SM80_Device_Trmm_tf32n_tf32t_f32t_rs_u_nu_tensor_op_f32_align1_align4, 64x128x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32n_tf32t_f32t_rs_u_nu_tensor_op_f32_align1_align4, 128x64x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 64, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32n_tf32t_f32t_rs_l_nu_tensor_op_f32_align1_align4, 64x128x32_32x64x32) { + +using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::ColumnMajor, + cutlass::SideMode::kRight, cutlass::FillMode::kLower, cutlass::DiagType::kNonUnit, + float, cutlass::layout::RowMajor, + float, cutlass::layout::RowMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd +>; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32t_tf32n_f32n_tensor_op_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32t_tf32n_f32n_tensor_op_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..ebb427b8cf8ff58d12bcc94224a3135cb6d3b247 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32t_tf32n_f32n_tensor_op_f32_ls_sm80.cu @@ -0,0 +1,449 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_l_nu_tensor_op_f32_align1_align1, 64x128x32_32x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, cutlass::FillMode::kLower, cutlass::DiagType::kNonUnit, + float, cutlass::layout::ColumnMajor, + float, cutlass::layout::ColumnMajor, + float, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 128, 32>, + cutlass::gemm::GemmShape<32, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + float, + 1, + float, + float + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, + 3, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_l_nu_tensor_op_f32_align1_align4, 128x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_l_nu_tensor_op_f32_align1_align4, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_l_nu_tensor_op_f32_align1_align4, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_l_nu_tensor_op_f32_align1_align4, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_u_nu_tensor_op_f32_align1_align4, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_u_nu_tensor_op_f32_align1_align4, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_u_nu_tensor_op_f32_align1_align4, 256x128x16_128x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_u_nu_tensor_op_f32_align1_align4, 128x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32n_ls_u_nu_tensor_op_f32_align1_align4, 256x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::ColumnMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32t_tf32n_f32t_tensor_op_f32_ls_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32t_tf32n_f32t_tensor_op_f32_ls_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..ba3f7f3864c77fa1fdd2dd46ff0c0c1e24c4bc72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/device/trmm_tf32t_tf32n_f32t_tensor_op_f32_ls_sm80.cu @@ -0,0 +1,458 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide TRMM interface + + +*/ + +#include + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/blas3.h" +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/trmm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +#include "testbed_trmm_universal.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +////////////////////////////////////////////Test name////////////////////////////////////////////////// +// +// SM80_Device_Trmm_{ElementA}{LayoutA}_{ElementB}{LayoutB}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ +// _{DiagType}_tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} +// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_un_tensor_op_f32_align1_align1, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 1, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 1, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_un_tensor_op_f32_align1_align4, 64x64x32_32x32x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_un_tensor_op_f32_align1_align4, 256x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_un_tensor_op_f32_align1_align4, 128x256x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_nu_tensor_op_f32_align1_align4, 256x128x32_64x64x32) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kLower, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 32>, + cutlass::gemm::GemmShape<64, 64, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_nu_tensor_op_f32_align1_align4, 64x64x16_32x32x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<32, 32, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 10, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_un_tensor_op_f32_align1_align4, 128x128x16_64x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 128, 16>, + cutlass::gemm::GemmShape<64, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_nu_tensor_op_f32_align1_align4, 256x128x16_128x64x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 128, 16>, + cutlass::gemm::GemmShape<128, 64, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 4, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_nu_tensor_op_f32_align1_align4, 128x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<128, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_nu_tensor_op_f32_align1_align4, 256x256x16_64x128x16) { + + using ElementOutput = float; + using ElementAccumulator = float; + + using Trmm = cutlass::gemm::device::Trmm< + float, + cutlass::layout::RowMajor, + cutlass::SideMode::kLeft, + cutlass::FillMode::kUpper, + cutlass::DiagType::kNonUnit, + float, + cutlass::layout::ColumnMajor, + ElementOutput, + cutlass::layout::RowMajor, + ElementAccumulator, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + cutlass::gemm::GemmShape<256, 256, 16>, + cutlass::gemm::GemmShape<64, 128, 16>, + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementAccumulator + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + 3, + 1, + 4, + false, + cutlass::arch::OpMultiplyAdd + >; + + EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal()); +} +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/kernel/batched_gemv.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/kernel/batched_gemv.cu new file mode 100644 index 0000000000000000000000000000000000000000..4e06485a1a21888778de0e31e4a3e6c00c2e1619 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/kernel/batched_gemv.cu @@ -0,0 +1,1082 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#include "testbed_gemv.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcr_alpha_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size, -0.5f); +} + +TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcr_alpha_beta_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size, 4.5f, -0.5f); +} + +TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_rcr_alpha_beta_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f)); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + kBatchTileSize>(problem_size); +} + +///////////// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_crc_alpha_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size, -0.5f); +} + +TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_crc_alpha_beta_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size, 4.5f, -0.5f); +} + +TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_crc_alpha_beta_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f)); +} + +///////////// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp16_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +/// + +TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 1; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 2; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; + static int const kBatchTileSize = 8; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_i8_i32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + int8_t, int32_t, int32_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size); +} + +TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcc_alpha_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size, -0.5f); +} + +TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcc_alpha_beta_fp32_fp32) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + float, float, float, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size, 4.5f, -0.5f); +} + +TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_rcc_alpha_beta_fp16_fp16) +{ + cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096); + + using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; + using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; + static int const kBatchTileSize = 64; + + test::gemm::kernel::batched_gemv_kernel_test< + ThreadBlockShape, + ThreadShape, + cutlass::half_t, float, cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::layout::ColumnMajor, + cutlass::layout::ColumnMajor, + kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f)); +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/kernel/testbed_gemv.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/kernel/testbed_gemv.h new file mode 100644 index 0000000000000000000000000000000000000000..dc551efac189b663fb8d6bab8814d73dab92d515 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/kernel/testbed_gemv.h @@ -0,0 +1,358 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/core_io.h" +#include "cutlass/numeric_types.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/tensor_ref.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "cutlass/gemm/kernel/default_gemv.h" +#include "cutlass/gemm/kernel/gemv_batched_strided.h" + +namespace test { +namespace gemm { +namespace kernel { + +template +void batched_gemv_kernel_test(cutlass::gemm::BatchedGemmCoord problem_size, + ElementCD_ alpha = ElementCD_(1), + ElementCD_ beta = ElementCD_(0), + bool perf_test = false, + int perf_test_iter = 1) +{ + using ThreadBlockShape = ThreadBlockShape_; + using ThreadShape = ThreadShape_; + using ElementA = ElementAB_; + using LayoutA = LayoutA_; + using ElementB = ElementAB_; + using LayoutB = LayoutB_; + using ElementAccumulator = ElementCD_; + using ElementCD = ElementCD_; + using LayoutCD = LayoutCD_; + + using GemvKernel = cutlass::gemm::kernel::DefaultGemv; + + using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv; + using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle; + + if (DEBUG) + { + problem_size = cutlass::gemm::BatchedGemmCoord( + problem_size.m(), problem_size.n(), problem_size.k(), 1); + } + + // Create host tensors that will be the backing store for the batches + // Note that no device memory is initially allocated + cutlass::HostTensor matrix_A({problem_size.m(), problem_size.k()}, false); + cutlass::HostTensor matrix_B({problem_size.k(), problem_size.n()}, false); + cutlass::HostTensor matrix_C_computed({problem_size.m(), problem_size.n()}, false); + cutlass::HostTensor matrix_C_reference({problem_size.m(), problem_size.n()}, false); + + // Reserve memory for the batch of tensors + matrix_A.reserve(problem_size.m()*problem_size.k()*problem_size.batch()); + matrix_B.reserve(problem_size.n()*problem_size.k()*problem_size.batch()); + matrix_C_computed.reserve(problem_size.m()*problem_size.n()*problem_size.batch()); + matrix_C_reference.reserve(problem_size.m()*problem_size.n()*problem_size.batch(), false); + + // Fill eatch tensor batch + const int seed = 9876; + for (int b = 0; b < problem_size.batch(); b++) + { + if(DEBUG) + { + cutlass::reference::host::BlockFillSequential( + matrix_A.host_data_ptr_offset(b*matrix_A.capacity()), matrix_A.capacity()); + cutlass::reference::host::BlockFillSequential( + matrix_B.host_data_ptr_offset(b*matrix_B.capacity()), matrix_B.capacity()); + } + else + { + cutlass::reference::host::TensorFillRandomUniform( + matrix_A.host_view(b*matrix_A.capacity()), + seed + 1660, + 8, + -8, + 0 + ); + + cutlass::reference::host::TensorFillRandomUniform( + matrix_B.host_view(b*matrix_B.capacity()), + seed + 1880, + 8, + -8, + 0 + ); + } + + cutlass::reference::host::TensorFill(matrix_C_computed.host_view(b*matrix_C_computed.capacity())); + cutlass::reference::host::TensorFill(matrix_C_reference.host_view(b*matrix_C_reference.capacity())); + } + + matrix_A.sync_device(); + matrix_B.sync_device(); + matrix_C_computed.sync_device(); + + ThreadBlockSwizzle swizzle; + + cutlass::gemm::BatchedGemmCoord tiled_size{ThreadBlockShape::kM, + ThreadBlockShape::kN, + problem_size.k(), // no split-k + DEBUG ? 1 : THREAD_B }; + + cutlass::gemm::BatchedGemmCoord tiled_shape = swizzle.get_tiled_shape(problem_size, tiled_size); + + #if 0 + printf("tiled_size = %d %d %d %d\n", tiled_size.m(), tiled_size.n(), tiled_size.k(), tiled_size.batch()); + printf("tiled_shape = %d %d %d %d\n", tiled_shape.m(), tiled_shape.n(), tiled_shape.k(), tiled_shape.batch()); + #endif + + // No split-k + EXPECT_EQ(tiled_size.k(), problem_size.k()); + + dim3 grid = swizzle.get_grid_shape(tiled_shape); + dim3 block(tiled_size.n() / ThreadShape::kN, tiled_size.batch(), tiled_size.k() / problem_size.k()); + + // Some sanity checks + EXPECT_TRUE( block.x*block.y*block.z <= 1024 ); + EXPECT_TRUE( block.x <= 1024 ); + EXPECT_TRUE( block.y <= 1024 ); + EXPECT_TRUE( block.z <= 64 ); + + #if 0 + printf("grid dim = %d, %d, %d\n", grid.x, grid.y, grid.z); + printf("block dim = %d, %d, %d\n", block.x, block.y, block.z); + #endif + + cudaError_t result; + cudaEvent_t start_event, end_event; + + for (int iter = 0; iter < (perf_test ? (perf_test_iter+1) : 1); ++iter) + { + if (perf_test && iter == 1) + { + result = cudaEventCreate(&start_event); + EXPECT_EQ(result, cudaSuccess); + + result = cudaEventCreate(&end_event); + EXPECT_EQ(result, cudaSuccess); + + result = cudaEventRecord(start_event); + EXPECT_EQ(result, cudaSuccess); + } + + if (beta == ElementCD(0)) + { + if (alpha == ElementCD(1)) + { + cutlass::gemm::kernel::GemvBatchedStrided<<< grid, block >>>( + problem_size, + matrix_A.device_ref(), + matrix_A.capacity(), + matrix_B.device_ref(), + matrix_B.capacity(), + matrix_C_computed.device_ref(), + matrix_C_computed.capacity() + ); + } + else + { + cutlass::gemm::kernel::GemvBatchedStrided<<< grid, block >>>( + problem_size, + alpha, + matrix_A.device_ref(), + matrix_A.capacity(), + matrix_B.device_ref(), + matrix_B.capacity(), + matrix_C_computed.device_ref(), + matrix_C_computed.capacity() + ); + } + } + else + { + cutlass::gemm::kernel::GemvBatchedStrided<<< grid, block >>>( + problem_size, + alpha, + beta, + matrix_A.device_ref(), + matrix_A.capacity(), + matrix_B.device_ref(), + matrix_B.capacity(), + matrix_C_computed.device_ref(), + matrix_C_computed.capacity(), + matrix_C_computed.device_ref(), + matrix_C_computed.capacity() + ); + } + + if (iter == 0) + { + result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result); + } + } + + if (perf_test) + { + result = cudaEventRecord(end_event); + EXPECT_EQ(result, cudaSuccess); + } + + result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result); + + if (perf_test) + { + float ms; + result = cudaEventElapsedTime(&ms, start_event, end_event); + EXPECT_EQ(result, cudaSuccess); + + double flops = (double(problem_size.m()) * + double(problem_size.n()) * + double(problem_size.k()) * + double(problem_size.batch()) * 2); // 2 for MAC + + double read_bytes = double(problem_size.batch()) * (sizeof(ElementA)*double(problem_size.m())*double(problem_size.k()) + + sizeof(ElementB)*double(problem_size.k())*double(problem_size.n())); + + double write_bytes = double(problem_size.batch()) * (sizeof(ElementCD)*double(problem_size.m())*double(problem_size.n())); + + double avg_runtime = double(ms) / perf_test_iter; + double gflops_per_sec = flops / 1.0e6 / avg_runtime; + double read_bandwidth = read_bytes / 1.0e6 / avg_runtime; + double write_bandwidth = write_bytes / 1.0e6 / avg_runtime; + + std::cout << "\n\nProblem size: " + << problem_size.m() + << " x " << problem_size.n() + << " x " << problem_size.k() + << " x " << problem_size.batch() + << std::endl; + + std::cout << " GFLOPs: " << gflops_per_sec << std::endl; + std::cout << "BW (R/W): " << read_bandwidth << " / " << write_bandwidth << " GB/sec" << std::endl; + std::cout << " Runtime: " << avg_runtime << " ms" << std::endl; + } + else + { + matrix_C_computed.sync_host(); + + // Compute the batched gemms + for (int b = 0; b < problem_size.batch(); b++) + { + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + problem_size.mnk(), alpha, + matrix_A.host_ref(b * matrix_A.capacity()), + matrix_B.host_ref(b * matrix_B.capacity()), beta, + matrix_C_reference.host_ref(b * matrix_C_computed.capacity())); + + bool passed = cutlass::reference::host::TensorEquals( + matrix_C_computed.host_view(b * matrix_C_computed.capacity()), + matrix_C_reference.host_view(b * matrix_C_reference.capacity())); + + EXPECT_TRUE(passed) + //<< "A:\n" << matrix_A.host_view() << "\n" + //<< "B:\n" << matrix_B.host_view() << "\n" + << "Batch: " << b << "\n" + << "Reference:\n" + << matrix_C_reference.host_view(b * matrix_C_reference.capacity()) + << "\n" + << "Computed:\n" + << matrix_C_computed.host_view(b * matrix_C_computed.capacity()) + << "\n"; + } + } +} + +template +void batched_gemv_kernel_perf_test(cutlass::gemm::BatchedGemmCoord problem_size, + ElementCD_ alpha = ElementCD_(1), + ElementCD_ beta = ElementCD_(0), + int iter = 50) +{ + batched_gemv_kernel_test(problem_size, alpha, beta, true, iter); +} + +} // namespace threadblock +} // namespace kernel +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..cdd71bc18223fcc7be8ddfefe36654de0c73fe7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/CMakeLists.txt @@ -0,0 +1,39 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_thread + gemm_sm50.cu + gemm_sm60.cu + gemm_sm61.cu + testbed.h + ) + +add_subdirectory(host) + +add_dependencies(test_unit_gemm_thread test_unit_gemm_thread_host) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..1ac6ea57a29f62eded0a00f2e5b546070eff2bc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm50.cu @@ -0,0 +1,175 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/gemm/thread/mma.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Sgemm_thread, col_row_3x4x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<3, 4, 2>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, col_row_4x4x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<4, 4, 2>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, row_col_4x4x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<4, 4, 2>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, col_row_4x5x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<4, 5, 3>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, col_row) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, row_col) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, col_col) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, row_row) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Dgemm_thread, col_row) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Dgemm_thread, row_col) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::ColumnMajor + >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm60.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm60.cu new file mode 100644 index 0000000000000000000000000000000000000000..23099b27f217a4085857d85133df5ad0d10da271 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm60.cu @@ -0,0 +1,499 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/gemm/thread/mma.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Compute capability SM60 +// + +TEST(SM60_Hgemm_thread, col_row_col_1x1x16) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<1, 1, 16>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_row_1x1x16) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<1, 1, 16>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_col_1x3x8) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<1, 3, 8>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_row_row_7x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<7, 8, 3>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_row_7x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<7, 8, 3>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_row_row_7x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<7, 8, 3>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_col_row_7x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<7, 8, 3>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_row_row_7x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<7, 8, 4>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_row_7x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<7, 8, 4>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_row_row_7x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<7, 8, 4>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_col_row_7x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<7, 8, 4>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_row_col_16x3x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 3, 3>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_col_16x3x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 3, 3>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_row_col_16x3x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 3, 3>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_col_col_16x3x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 3, 3>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_row_col_16x3x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 3, 4>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_col_16x3x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 3, 4>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_row_col_16x3x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 3, 4>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_col_col_16x3x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 3, 4>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_row_row_16x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 3>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_row_col_16x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 3>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_row_16x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 3>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +}TEST(SM60_Hgemm_thread, row_col_col_16x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 3>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_row_row_16x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 3>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_row_col_16x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 3>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_col_row_16x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 3>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_col_col_16x8x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 3>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_row_row_16x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_row_col_16x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_row_16x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, row_col_col_16x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_row_row_16x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_row_col_16x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_col_row_16x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_Hgemm_thread, col_col_col_16x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<16, 8, 4>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm61.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm61.cu new file mode 100644 index 0000000000000000000000000000000000000000..68f91108d6af5a583fcfce5858540ccf37e414ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/gemm_sm61.cu @@ -0,0 +1,87 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/gemm/thread/mma.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Compute capability SM61 +// + +TEST(SM61_Igemm_thread, col_row_1x1x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<1, 1, 4>, + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + int32_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM61_Igemm_thread, col_row_2x3x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 3, 4>, + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + int32_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM61_Igemm_thread, col_row_8x8x4) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 4>, + int8_t, + cutlass::layout::RowMajor, + int8_t, + cutlass::layout::ColumnMajor, + int32_t, + cutlass::layout::ColumnMajor + >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..affade4c64e6fbc3f75862364efc512fdf1ec41a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/CMakeLists.txt @@ -0,0 +1,33 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_thread_host + gemm_sm60_host.cu + testbed_host.h + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/gemm_sm60_host.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/gemm_sm60_host.cu new file mode 100644 index 0000000000000000000000000000000000000000..5b1b5da2f33637f4aedf8dbbf07e439b31399ab1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/gemm_sm60_host.cu @@ -0,0 +1,176 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../../common/cutlass_unit_test.h" + +#include "cutlass/gemm/thread/mma.h" + +#include "testbed_host.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Compute capability SM60 +// + +TEST(SM60_host_Hgemm_thread, col_row_col_1x1x16) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<1, 1, 16>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, row_col_row_1x1x16) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<1, 1, 16>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, row_row_row_2x2x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 2, 2>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, row_row_col_2x2x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 2, 2>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, row_col_row_2x2x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 2, 2>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, row_col_col_2x2x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 2, 2>, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, col_row_row_2x2x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 2, 2>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, col_row_col_2x2x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 2, 2>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, col_col_row_2x2x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 2, 2>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor + >().run(); +} + +TEST(SM60_host_Hgemm_thread, col_col_col_2x2x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<2, 2, 2>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor + >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/testbed_host.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/testbed_host.h new file mode 100644 index 0000000000000000000000000000000000000000..bd7894792b6d9c097a94bbe4443014055890e09d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/host/testbed_host.h @@ -0,0 +1,232 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#pragma once + +#include "cutlass/gemm/thread/mma.h" +#include "cutlass/layout/vector.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +namespace test { +namespace gemm { +namespace thread { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Thread-level matrix multiply-accumulate +template +void kernel( + typename Mma::ElementC *D, + typename Mma::ElementA const *A, + typename Mma::ElementB const *B, + typename Mma::ElementC const *C) { + + auto ptr_D = reinterpret_cast *>(D); + auto ptr_A = reinterpret_cast const *>(A); + auto ptr_B = reinterpret_cast const *>(B); + auto ptr_C = reinterpret_cast const *>(C); + + Mma mma; + + auto a = *ptr_A; + auto b = *ptr_B; + auto c = *ptr_C; + + using Btype = typename Mma::ElementB; + cutlass::Array d; + + mma(d, a, b, c); + + *ptr_D = d; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape, + /// Data type of A elements + typename ElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Data type of B elements + typename ElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Element type of C matrix + typename ElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC +> +struct Testbed { + + /// Thread-level matrix multiply-accumulate operator + using Mma = cutlass::gemm::thread::Mma< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC + >; + + // + // Data members + // + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D_computed; + cutlass::HostTensor tensor_D_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed() { + + tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK), false); + tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN), false); + tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + } + + /// Runs the test + bool run() { + + // + // initialize device memory + // + + cutlass::reference::host::detail::RandomUniformFunc< ElementA > tfill_rand_func( + 0, // seed + 10, // max + 0, // min + 0); // bits after decimal + + cutlass::reference::host::detail::TensorFillRandomUniformFunc< ElementA, LayoutA > tfill_rand( + tensor_A.host_view(), + tfill_rand_func); + + for (auto i=0; i< Shape::kM; i++) + for (auto j=0; j< Shape::kK; j++) + tfill_rand(cutlass::make_Coord(i,j)); + + cutlass::reference::host::BlockFillSequential( + tensor_B.host_data(), + tensor_B.capacity(), + ElementB(1), + ElementB(2) + ); + + cutlass::reference::host::TensorFill( + tensor_C.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_computed.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_reference.host_view(), + ElementC(0) + ); + + + // Host side call + kernel( + tensor_D_computed.host_data(), + tensor_A.host_data(), + tensor_B.host_data(), + tensor_C.host_data()); + + // + // Reference implementation + // + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + {Shape::kM, Shape::kN, Shape::kK}, + ElementC(1), + tensor_A.host_ref(), + tensor_B.host_ref(), + ElementC(0), + tensor_D_reference.host_ref() + ); + + // + // Verify equivalence + // + + // compare + bool passed = cutlass::reference::host::TensorEquals( + tensor_D_computed.host_view(), + tensor_D_reference.host_view() + ); + + EXPECT_TRUE(passed) + << "A:\n" << tensor_A.host_view() << "\n\n" + << "B:\n" << tensor_B.host_view() << "\n\n" + << "C:\n" << tensor_C.host_view() << "\n\n" + << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" + << "Computed:\n" << tensor_D_computed.host_view() << std::endl; + + + return passed; + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace thread +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..c5ad60fea083663050b5e7d46dd46b36d02c27f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/thread/testbed.h @@ -0,0 +1,236 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#pragma once + +#include "cutlass/gemm/thread/mma.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +namespace test { +namespace gemm { +namespace thread { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Thread-level matrix multiply-accumulate +template +__global__ void kernel( + typename Mma::ElementC *D, + typename Mma::ElementA const *A, + typename Mma::ElementB const *B, + typename Mma::ElementC const *C) { + + auto ptr_D = reinterpret_cast *>(D); + auto ptr_A = reinterpret_cast const *>(A); + auto ptr_B = reinterpret_cast const *>(B); + auto ptr_C = reinterpret_cast const *>(C); + + Mma mma; + + auto a = *ptr_A; + auto b = *ptr_B; + auto c = *ptr_C; + + cutlass::Array d; + + mma(d, a, b, c); + + *ptr_D = d; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape, + /// Data type of A elements + typename ElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Data type of B elements + typename ElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Element type of C matrix + typename ElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC +> +struct Testbed { + + /// Thread-level matrix multiply-accumulate operator + using Mma = cutlass::gemm::thread::Mma< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC + >; + + // + // Data members + // + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D_computed; + cutlass::HostTensor tensor_D_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed() { + + tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK)); + tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN)); + tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + } + + /// Runs the test + bool run() { + + // + // initialize device memory + // + + cutlass::reference::host::BlockFillSequential( + tensor_A.host_data(), + tensor_A.capacity() + ); + + cutlass::reference::host::BlockFillSequential( + tensor_B.host_data(), + tensor_B.capacity(), + ElementB(1), + ElementB(2) + ); + + cutlass::reference::host::TensorFill( + tensor_C.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_computed.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_reference.host_view(), + ElementC(0) + ); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D_computed.sync_device(); + + // launch kernel + kernel<<< dim3(1, 1), dim3(1, 1, 1) >>>( + tensor_D_computed.device_data(), + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data()); + + // verify no errors + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); + if (result != cudaSuccess) { + return false; + } + + tensor_D_computed.sync_host(); + + // + // Reference implementation + // + + //tensor_D_reference.fill(tensor_C.host_view()); + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + {Shape::kM, Shape::kN, Shape::kK}, + ElementC(1), + tensor_A.host_ref(), + tensor_B.host_ref(), + ElementC(0), + tensor_D_reference.host_ref() + ); + + // + // Verify equivalence + // + + // compare + bool passed = cutlass::reference::host::TensorEquals( + tensor_D_computed.host_view(), + tensor_D_reference.host_view() + ); + + EXPECT_TRUE(passed) + << "A:\n" << tensor_A.host_view() << "\n\n" + << "B:\n" << tensor_B.host_view() << "\n\n" + << "C:\n" << tensor_C.host_view() << "\n\n" + << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" + << "Computed:\n" << tensor_D_computed.host_view() << std::endl; + + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace thread +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..170d0d45d9c4db0d00a8801e312e1f2e14c5b9f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/CMakeLists.txt @@ -0,0 +1,46 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_threadblock + mma_multistage.cu + mma_multistage_sparse.cu + mma_pipelined_sm80.cu + mma_multistage_slicedk.cu + mma_pipelined_slicedk.cu + mma_pipelined_wmma_sm70.cu + mma_pipelined_wmma_sm75.cu + mma_singlestage_wmma_sm70.cu + mma_singlestage_wmma_sm75.cu + mma_pipelined_sm70.cu + mma_pipelined_sm75.cu + mma_pipelined_simt.cu + mma_planar_complex_sm80.cu + +) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/batched_gemv.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/batched_gemv.cu new file mode 100644 index 0000000000000000000000000000000000000000..28b49f40362b450ebda1ddedc783be010b8a574e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/batched_gemv.cu @@ -0,0 +1,646 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for threadblock level GEMV +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/numeric_types.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/tensor_ref.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "cutlass/gemm/threadblock/gemv.h" +#include "cutlass/gemm/threadblock/default_gemv_core.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +__global__ void batched_gemv_threadblock_test_kernel( + cutlass::gemm::GemmCoord problem_size, + LongIndex stride_a, + LongIndex stride_b, + LongIndex stride_c, + RefA ref_A, + RefB ref_B, + RefC ref_C + ) { + + typename Gemv::IteratorA::TensorCoord threadblock_offset_A(0, 0); + typename Gemv::IteratorB::TensorCoord threadblock_offset_B(0, 0); + typename Gemv::IteratorB::TensorCoord threadblock_offset_C(0, 0); + + // Move to the right batches for these threads + ref_A.add_pointer_offset(threadIdx.y * stride_a); + ref_B.add_pointer_offset(threadIdx.y * stride_b); + ref_C.add_pointer_offset(threadIdx.y * stride_c); + + // Construct iterators to A and B operands + typename Gemv::IteratorA::Params params_A(ref_A.layout()); + typename Gemv::IteratorA iterator_A(params_A, ref_A.data(), { problem_size.m(), problem_size.k() }, 0, threadblock_offset_A); + typename Gemv::IteratorB::Params params_B(ref_B.layout()); + typename Gemv::IteratorB iterator_B(params_B, ref_B.data(), { problem_size.k(), problem_size.n() }, threadIdx.x, threadblock_offset_B); + + Gemv gemv; + + typename Gemv::FragmentC accum; + accum.clear(); + + // Compute threadblock-scoped matrix multiply-add + gemv(problem_size, accum, iterator_A, iterator_B, accum); + + // IteratorC is PitchLinear<> assumes n() contiguous + typename Gemv::IteratorC::Params params_C(ref_C.layout()); + typename Gemv::IteratorC iterator_C(params_C, ref_C.data(), { problem_size.m(), problem_size.n() }, threadIdx.x, threadblock_offset_C); + iterator_C.store(accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +void batched_gemv_threadblock_test(cutlass::gemm::GemmCoord problem_size, int num_batch) +{ + using Shape = Shape_; + using ElementA = ElementAB_; + using LayoutA = LayoutA_; + using ElementB = ElementAB_; + using LayoutB = LayoutB_; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using ThreadShape = cutlass::gemm::GemmShape<1, THREAD_N, THREAD_K>; + + using Core = typename cutlass::gemm::threadblock::DefaultGemvCore< + Shape, + ThreadShape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC + >; + + if (DEBUG) + { + num_batch = 1; + } + + using Mma = cutlass::gemm::threadblock::Gemv; + + // Create host tensors that will be the backing store for the batches + // Note that no device memory is initially allocated + cutlass::HostTensor matrix_A({problem_size.m(), problem_size.k()}, false); + cutlass::HostTensor matrix_B({problem_size.k(), problem_size.n()}, false); + cutlass::HostTensor matrix_C_computed({problem_size.m(), problem_size.n()}, false); + cutlass::HostTensor matrix_C_reference({problem_size.m(), problem_size.n()}, false); + + // Reserve memory for the batch of tensors + matrix_A.reserve(problem_size.m()*problem_size.k()*num_batch); + matrix_B.reserve(problem_size.n()*problem_size.k()*num_batch); + matrix_C_computed.reserve(problem_size.m()*problem_size.n()*num_batch); + matrix_C_reference.reserve(problem_size.m()*problem_size.n()*num_batch, false); + + // Fill eatch tensor batch + const int seed = 6834; + for (int b = 0; b < num_batch; b++) + { + if(DEBUG) + { + cutlass::reference::host::BlockFillSequential( + matrix_A.host_data_ptr_offset(b*matrix_A.capacity()), matrix_A.capacity()); + cutlass::reference::host::BlockFillSequential( + matrix_B.host_data_ptr_offset(b*matrix_B.capacity()), matrix_B.capacity()); + } + else + { + cutlass::reference::host::TensorFillRandomUniform( + matrix_A.host_view(b*matrix_A.capacity()), + seed + 1660, + 8, + -8, + 0 + ); + + cutlass::reference::host::TensorFillRandomUniform( + matrix_B.host_view(b*matrix_B.capacity()), + seed + 1880, + 8, + -8, + 0 + ); + } + + cutlass::reference::host::TensorFill(matrix_C_computed.host_view(b*matrix_C_computed.capacity())); + cutlass::reference::host::TensorFill(matrix_C_reference.host_view(b*matrix_C_reference.capacity())); + } + + matrix_A.sync_device(); + matrix_B.sync_device(); + matrix_C_computed.sync_device(); + + dim3 grid(1, 1); // only 1 CTA is used + dim3 block(Shape::kN / THREAD_N, num_batch, 1); + + #if 0 + printf("block dim = %d x %d\n", block.x, block.y); + #endif + + // Some sanity checks + EXPECT_TRUE( problem_size.n() % THREAD_N == 0 ); + EXPECT_TRUE( block.x*block.y <= MAX_THREADS_PER_BLOCK ); + + test::gemm::threadblock::batched_gemv_threadblock_test_kernel<<< grid, block >>>( + problem_size, + matrix_A.capacity(), + matrix_B.capacity(), + matrix_C_computed.capacity(), + matrix_A.device_ref(), + matrix_B.device_ref(), + matrix_C_computed.device_ref() + ); + + cudaError_t result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result); + + matrix_C_computed.sync_host(); + + // Compute the batched gemms + for (int b = 0; b < num_batch; b++) + { + + cutlass::reference::host::Gemm reference_gemm; + + reference_gemm( + problem_size.mnk(), + ElementC(1), + matrix_A.host_ref(b*matrix_A.capacity()), + matrix_B.host_ref(b*matrix_B.capacity()), + ElementC(0), + matrix_C_reference.host_ref(b*matrix_C_computed.capacity()) + ); + + bool passed = cutlass::reference::host::TensorEquals( + matrix_C_computed.host_view(b*matrix_C_computed.capacity()), + matrix_C_reference.host_view(b*matrix_C_reference.capacity())); + + EXPECT_TRUE(passed) + //<< "A:\n" << matrix_A.host_view() << "\n" + //<< "B:\n" << matrix_B.host_view() << "\n" + << "Batch: " << b << "\n" + << "Reference:\n" << matrix_C_reference.host_view(b*matrix_C_reference.capacity()) << "\n" + << "Computed:\n" << matrix_C_computed.host_view(b*matrix_C_computed.capacity()) << "\n"; + } +} + +} // namespace threadblock +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// A: ColumnMajor +// B: RowMajor +// C: ColumnMajor + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp32_fp32_2N_2K) { + + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 2; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 5x1x128x128_crc_fp32_fp32_4N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 128, 128); + const int num_batch = 5; + const int THREAD_N = 4; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_fp32_fp32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp16_fp32_2N_2K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 2; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp16_fp32_2N_8K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 8; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_fp16_fp32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_i8_i32_2N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_i8_i32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +// A: RowMajor +// B: ColumnMajor +// C: RowMajor + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp32_fp32_2N_2K) { + + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 2; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 5x1x128x128_rcr_fp32_fp32_4N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 128, 128); + const int num_batch = 5; + const int THREAD_N = 4; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_fp32_fp32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp16_fp32_2N_2K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 2; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp16_fp32_2N_8K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 8; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_fp16_fp32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_i8_i32_2N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_i8_i32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +// A: RowMajor +// B: ColumnMajor +// C: ColumnMajor + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp32_fp32_2N_2K) { + + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 2; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 5x1x128x128_rcc_fp32_fp32_4N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 128, 128); + const int num_batch = 5; + const int THREAD_N = 4; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_fp32_fp32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp16_fp32_2N_2K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 2; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp16_fp32_2N_8K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 8; + + using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_fp16_fp32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_i8_i32_2N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 64, 64); + const int num_batch = 4; + const int THREAD_N = 2; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} + +TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_i8_i32_1N_4K) { + using namespace test::gemm::threadblock; + cutlass::gemm::GemmCoord problem_size(1, 17, 64); + const int num_batch = 16; + const int THREAD_N = 1; + const int THREAD_K = 4; + + using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; + batched_gemv_threadblock_test(problem_size, num_batch); +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/epilogue_workspace.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/epilogue_workspace.cu new file mode 100644 index 0000000000000000000000000000000000000000..7e0872314f63510e3c3dae2c750dd56675ab3ebe --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/epilogue_workspace.cu @@ -0,0 +1,130 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/epilogue/epilogue_workspace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel computes accumulator data and stores it out +template +__global__ void kernel_epilogue_workspace(typename Epilogue::Params params) { + + __shared__ typename Epilogue::SharedStorage shared_storage; + + int warp_id = threadIdx.y; + int lane_id = threadIdx.x; + + Epilogue epilogue(params, shared_storage, warp_id, lane_id); + + // + // Initialize accumulator tile + // + typename Epilogue::FragmentC accum; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Epilogue::FragmentC::kElements; ++i) { + accum[i] = Element(warp_id * blockDim.x + lane_id); + } + + // + // Efficient epilogue + // + + cutlass::GemmCoord tb_tile_coord{blockIdx.x, blockIdx.y, 0}; + + cutlass::GemmCoord problem_size = + tb_tile_coord * + cutlass::GemmCoord{Epilogue::Shape::kM, Epilogue::Shape::kN, 1}; + + // Store accumulators + epilogue( + problem_size, + tb_tile_coord, + accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_epilogue_workspace, tensor_op_128x128_64x64) { + + // + // Define an instance of the epilogue and see if it works + // + static int const kWarpCount = 4; + static int const kWarpSize = 32; + + using Shape = cutlass::MatrixShape<128, 128>; + using FragmentC = cutlass::Array; + + using Epilogue = cutlass::gemm::threadblock::EpilogueWorkspace< + Shape, + kWarpCount, + FragmentC + >; + + typename Epilogue::Params params( + + ); + + // Launch the kernel + dim3 grid(1,1); + dim3 block(kWarpSize, kWarpCount); + + test::gemm::threadblock::kernel_epilogue_workspace<<< grid, block >>>( + params + ); + + cudaError_t result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) << "Kernel launch error - " << cudaGetErrorString(result); + + // + // + // +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage.cu new file mode 100644 index 0000000000000000000000000000000000000000..1313b1abaab1eecbebe3e783d0318e07412c02d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage.cu @@ -0,0 +1,3833 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit tests for threadblock-level GEMM +*/ + +#include "mma_multistage_testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x64x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x64x64_64x32x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x128x64_32x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x128x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_256x256x384_128x128x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_512x256x384_256x128x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x64x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x64x32_64x32x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x128x32_32x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_512x256x768_256x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x64x32_64x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x64x32_64x32x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x128x32_32x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x128x32_64x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_256x256x192_128x128x32_64x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_512x256x384_256x128x32_64x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x64x16_64x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x64x16_64x32x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x128x16_32x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x128x16_64x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_256x256x192_128x128x16_64x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_512x256x384_256x128x16_64x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x64_32x32x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x64_64x32x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x128x64_32x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x128x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x384_128x128x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x768_256x128x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x32_32x32x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x32_64x32x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x128x32_32x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x768_256x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x32_64x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x32_32x32x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x32_64x32x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x128x32_32x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x128x32_64x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x192_128x128x32_64x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x192_256x128x32_64x64x32_16x8x8_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x16_64x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x16_32x32x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x16_64x32x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x128x16_32x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x128x16_64x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x192_128x128x16_64x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x192_256x128x16_64x64x16_16x8x8_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x128_64x64x128_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x128_32x32x128_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x128_64x32x128_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x128x128_32x64x128_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x128x128_64x64x128_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x768_128x128x128_64x64x128_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x768_256x128x128_64x64x128_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x64_64x64x64_16x8x32_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x64_32x32x64_16x8x32_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x64_64x32x64_16x8x32_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x128x64_32x64x64_16x8x32_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x128x64_64x64x64_16x8x32_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x768_128x128x64_64x64x64_16x8x32_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x768_256x128x64_64x64x64_16x8x32_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x256_64x64x256_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x256_32x32x256_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x256_64x32x256_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x256x256_32x64x256_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x256x256_64x64x256_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x1536_128x256x256_64x64x256_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 1536); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x1536_256x256x256_64x64x256_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 1536); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x128_64x64x128_16x8x64_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x128_32x32x128_16x8x64_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x128_64x32x128_16x8x64_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x256x128_32x64x128_16x8x64_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x256x128_64x64x128_16x8x64_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x1536_128x256x128_64x64x128_16x8x64_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 1536); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x1536_256x256x128_64x64x128_16x8x64_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 1536); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x1024_64x64x1024_16x8x256_3stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 1024>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x1024_32x32x1024_16x8x256_3stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 1024>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x1024_64x32x1024_16x8x256_3stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 1024>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x1024x1024_32x64x1024_16x8x256_3stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 1024>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x1024x1024_64x64x1024_16x8x256_3stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 1024>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x6144_128x1024x1024_64x64x1024_16x8x256_3stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 6144); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 1024>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x6144_256x1024x1024_64x64x1024_16x8x256_3stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 6144); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 1024>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x512_64x64x512_16x8x256_4stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x64x512_32x32x512_16x8x256_4stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x64x512_64x32x512_16x8x256_4stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_64x128x512_32x64x512_16x8x256_4stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + tensor_op_128x128x512_64x64x512_16x8x256_4stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 4096); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x6144_128x128x512_64x64x512_16x8x256_4stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 6144); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x6144_256x128x512_64x64x512_16x8x256_4stage) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 6144); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST(SM80_gemm_threadblock_congruous, + tensor_op_64x64x16_32x64x16_8x8x4_3stage) { + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 16); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 2, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k()) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + tensor_op_128x128x16_32x64x16_8x8x4_3stage) { + using ElementA = double; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = double; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k()) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_interleaved, + tensor_op_64x128x64_32x64x64_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = int8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_interleaved, + tensor_op_128x128x64_64x64x64_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = int8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_interleaved, + multicta_256x256x384_128x128x64_64x64x64_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = int8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_interleaved, + multicta_512x256x384_256x128x64_64x64x64_16x8x32_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = int8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_interleaved, + tensor_op_64x128x128_32x64x128_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_interleaved, + tensor_op_128x128x128_64x64x128_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_interleaved, + multicta_256x256x768_128x128x128_64x64x128_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_interleaved, + multicta_512x256x1536_256x128x128_64x64x128_16x8x64_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 1536); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise_f64, + tensor_op_32x32x16_16x16x16_8x8x4_4stage) { + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + + cutlass::gemm::GemmCoord problem_size(32, 32, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k()) + .run(grid, block); +} + +TEST(SM80_gemm_threadblock_crosswise_f64, + tensor_op_64x64x16_32x32x16_8x8x4_4stage) { + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k()) + .run(grid, block); +} + +TEST(SM80_gemm_threadblock_crosswise_f64, + tensor_op_64x128x16_32x64x16_8x8x4_4stage) { + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k()) + .run(grid, block); +} + +TEST(SM80_gemm_threadblock_crosswise_f64, + tensor_op_128x64x16_64x32x16_8x8x4_4stage) { + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k()) + .run(grid, block); +} + +TEST(SM80_gemm_threadblock_crosswise_f64, + tensor_op_128x128x16_32x64x16_8x8x4_3stage) { + using ElementA = double; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = double; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = double; + using LayoutC = cutlass::layout::RowMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k()) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_slicedk.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_slicedk.cu new file mode 100644 index 0000000000000000000000000000000000000000..7418732dd46e1b69c00abaafbcb856db27a2a18f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_slicedk.cu @@ -0,0 +1,111 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit tests for CTA-level GEMM specifically for sliced-k kernels (SM_61 and SM_75) +*/ + +#include "mma_multistage_testbed_slicedk.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Tensor Op GEMM for SM_80 +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous_sliced, tensor_op_128x64x256_tb128x64x64_warp64x64x32_16x8x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM80_gemm_threadblock_crosswise_sliced, tensor_op_128x64x256_tb128x64x64_warp64x64x32_16x8x16) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_sparse.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_sparse.cu new file mode 100644 index 0000000000000000000000000000000000000000..4bb98cd05d848df5a12c3b901eb74a780f93aa18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_sparse.cu @@ -0,0 +1,2703 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit tests for threadblock-level GEMM +*/ + +#include "mma_multistage_sparse_testbed.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x64x64_64x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x64x64_32x32x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_128x64x64_64x32x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x128x64_32x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_128x128x64_64x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + multicta_256x256x768_128x128x64_64x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + multicta_512x256x768_256x128x64_64x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x64_64x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x64_32x32x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x64x64_64x32x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x128x64_32x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x128x64_64x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_256x256x768_128x128x64_64x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_512x256x768_256x128x64_64x64x64_16x8x32_4stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x64x128_64x64x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_128x64x128_64x32x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x128x128_32x64x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_128x128x128_64x32x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + multicta_256x256x768_128x128x128_64x32x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x128_64x64x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x128_32x32x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x64x128_64x32x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x128x128_32x64x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x128x128_64x32x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_256x256x768_128x128x128_64x32x128_16x8x32_3stage) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 768); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x64x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x64x32_32x32x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_128x64x32_64x32x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x128x32_32x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_128x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + multicta_512x256x384_256x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x32_32x32x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x64x32_64x32x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x128x32_32x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_512x256x384_256x128x32_64x64x32_16x8x16_4stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x64x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_128x64x64_64x32x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_64x128x64_32x64x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + tensor_op_128x128x64_64x32x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_congruous, + multicta_256x256x384_128x128x64_64x32x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x64_64x64x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x64_32x32x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x64x64_64x32x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x128x64_32x64x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x128x64_64x32x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_256x256x384_128x128x64_64x32x64_16x8x16_3stage) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x128_64x64x128_16x8x64_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x128_32x32x128_16x8x64_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x64x128_64x32x128_16x8x64_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x128x128_32x64x128_16x8x64_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x128x128_64x64x128_16x8x64_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_256x256x1536_128x128x128_64x64x128_16x8x64_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 1536); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_512x256x1536_256x128x128_64x64x128_16x8x64_4stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 1536); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x256_64x64x256_16x8x64_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x256_32x32x256_16x8x64_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x64x256_64x32x256_16x8x64_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x128x256_32x64x256_16x8x64_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x128x256_64x32x256_16x8x64_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 1024); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_256x256x1536_128x128x256_64x32x256_16x8x64_3stage) { + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 1536); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x256_64x64x256_16x8x128_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x256_32x32x256_16x8x128_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x64x256_64x32x256_16x8x128_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x128x256_32x64x256_16x8x128_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x128x256_64x64x256_16x8x128_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_256x256x3072_128x128x256_64x64x256_16x8x128_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 3072); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_512x256x3072_256x128x256_64x64x256_16x8x128_4stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 3072); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 256>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 4; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x512_64x64x512_16x8x128_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x64x512_32x32x512_16x8x128_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x64x512_64x32x512_16x8x128_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_64x128x512_32x64x512_16x8x128_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + tensor_op_128x128x512_64x32x512_16x8x128_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 2048); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_sparse_gemm_threadblock_crosswise, + multicta_256x256x3072_128x128x512_64x32x512_16x8x128_3stage) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 3072); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + + float alpha = 1.f; + float beta = 0.0f; + int const Stages = 3; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, + Stages>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::SparseTestbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..44ef05305e4a651f98c2b216eff16532ba2519c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h @@ -0,0 +1,435 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit testbed for kernel-level GEMM +*/ + +#pragma once + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" +#include "cutlass/core_io.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/host_reorder.h" +#include "cutlass/util/host_uncompress.h" + +namespace test { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template +__global__ void kernel_multistage_mma_sparse(cutlass::gemm::GemmCoord problem_size, + typename Mma::IteratorA::Params params_A, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::Params params_B, + typename Mma::IteratorB::TensorRef ref_B, + typename Mma::ElementC *ptr_C, + typename Mma::LayoutC::Stride::Index ldc, + typename Mma::IteratorE::Params params_E, + typename Mma::IteratorE::TensorRef ref_E) { + // Shared storage needed by threadblock-scoped matrix multiply- + // Dynamic shared memory base pointer + extern __shared__ int GemmSharedStorageBase[]; + + // Declare pointer to dynamic shared memory. + typename Mma::SharedStorage *shared_storage = + reinterpret_cast(GemmSharedStorageBase); + + // Compute threadblock location + cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y), + 0}; + + cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k() / Mma::kSparse}; + + cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(), + tb_tile_offset.n() * Mma::Shape::kN}; + + cutlass::MatrixCoord tb_offset_E{tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k() / Mma::kSparse}; + + // Compute position within threadblock + int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A(params_A, ref_A.data(), + {problem_size.m(), problem_size.k() / Mma::kSparse}, + tb_thread_id, tb_offset_A); + + typename Mma::IteratorB iterator_B(params_B, ref_B.data(), + {problem_size.k(), problem_size.n()}, + tb_thread_id, tb_offset_B); + + typename Mma::IteratorE iterator_E( + params_E, ref_E.data(), + {problem_size.m(), + problem_size.k() / Mma::kSparse / Mma::kElementsPerElementE}, + tb_thread_id, tb_offset_E); + + int warp_id = __shfl_sync(0xffffffff, threadIdx.y, 0); + + // Construct thread-scoped matrix multiply + Mma mma(*shared_storage, tb_thread_id, warp_id, threadIdx.x); + + typename Mma::FragmentC accum; + + accum.clear(); + + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accum, iterator_A, iterator_B, iterator_E, accum); + + // Output results + typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, threadIdx.x); + + iterator_C.add_tile_offset( + {(tb_tile_offset.m() * Mma::WarpCount::kM) + + (warp_id % Mma::WarpCount::kM), + (tb_tile_offset.n() * Mma::WarpCount::kN) + + (warp_id / Mma::WarpCount::kM)}); + + iterator_C.store(accum); +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Threadblock-level matrix multiply-accumulate + typename MmaCore_> +struct SparseTestbed { + /// Threadblock-level GEMM implementation + using MmaCore = MmaCore_; + using ThreadblockShape = typename MmaCore::Shape; + using WarpShape = typename MmaCore::WarpShape; + using InstructionShape = typename MmaCore::InstructionShape; + using ElementA = typename MmaCore::ElementA; + using LayoutA = typename MmaCore::LayoutA; + using ElementB = typename MmaCore::ElementB; + using LayoutB = typename MmaCore::LayoutB; + using ElementC = typename MmaCore::ElementC; + using LayoutC = typename MmaCore::LayoutC; + using ElementE = typename MmaCore::ElementE; + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using ThreadMapE = typename MmaCore::IteratorThreadMapE; + using AccessTypeA = cutlass::Array; + using AccessTypeB = cutlass::Array; + using AccessTypeE = cutlass::Array; + static int const Stages = MmaCore::kStages; + static cutlass::arch::CacheOperation::Kind const CacheOpA = + MmaCore::kCacheOpA; + static cutlass::arch::CacheOperation::Kind const CacheOpB = + MmaCore::kCacheOpB; + static cutlass::arch::CacheOperation::Kind const CacheOpE = + MmaCore::kCacheOpE; + + static int const Sparse = MmaCore::kSparse; + static int const MetaSizeInBits = MmaCore::kMetaSizeInBits; + static int const MaxID2 = MmaCore::kMaxID2; + + using LayoutE = cutlass::layout::RowMajor; + using ReorderedLayoutE = typename MmaCore::GmemLayoutE; + + static int const ElementsPerElementE = MmaCore::kElementsPerElementE; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define iterators over tiles from the E operand + using IteratorE = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementE, ReorderedLayoutE, 1, ThreadMapE, AccessTypeE>; + + // Define the threadblock-scoped pipelined matrix multiply + using Mma = cutlass::gemm::threadblock::SparseMmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + CacheOpA, IteratorB, typename MmaCore::SmemIteratorB, CacheOpB, ElementC, + LayoutC, IteratorE, typename MmaCore::SmemIteratorE, CacheOpE, + typename MmaCore::MmaPolicy, Stages>; + + // + // Data members + // + + cutlass::HostTensor matrix_A; + cutlass::HostTensor matrix_A_uncompressed; + cutlass::HostTensor matrix_B; + cutlass::HostTensor matrix_C_computed; + cutlass::HostTensor matrix_C_reference; + cutlass::HostTensor matrix_E; + cutlass::HostTensor matrix_E_reordered; + + cutlass::gemm::GemmCoord problem_size; + float alpha, beta; + + // + // Methods + // + + /// Allocates workspace in device memory + SparseTestbed(int m, int n, int k, float alpha_ = float(1), float beta_ = float(0)) + : problem_size(m, n, k), alpha(alpha_), beta(beta_) { + matrix_A.reset(cutlass::make_Coord(m, k / Sparse)); + matrix_A_uncompressed.reset(cutlass::make_Coord(m, k)); + matrix_B.reset(cutlass::make_Coord(k, n)); + matrix_C_computed.reset(cutlass::make_Coord(m, n)); + matrix_C_reference.reset(cutlass::make_Coord(m, n), false); + matrix_E.reset(cutlass::make_Coord(m, k / Sparse / ElementsPerElementE)); + matrix_E_reordered.reset( + cutlass::make_Coord(m, k / Sparse / ElementsPerElementE)); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + // + // Determine SMEM requirements and waive if not satisfied + // + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + return true; + } + + /// Runs the test + bool run( + dim3 grid, dim3 block, + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_E = cutlass::Distribution::Uniform) { + + // Waive the test + if (!sufficient()) { + return true; + } + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_A.host_view(), seed, scope_max, scope_min, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_A.host_data(), + matrix_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_B.host_view(), seed + 16, scope_max, scope_min, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_B.host_data(), + matrix_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_B.host_view()); + } else { + return false; + } + + cutlass::reference::host::TensorFill(matrix_C_computed.host_view()); + + cutlass::reference::host::TensorFill(matrix_C_reference.host_view()); + + if (init_E == cutlass::Distribution::Uniform) { + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomSparseMeta( + matrix_E.host_view(), seed, MetaSizeInBits); + } else if (init_E == cutlass::Distribution::Identity) { + uint32_t content = (MaxID2 == 1) ? 0x44444444 : 0x4444; + cutlass::reference::host::TensorFill(matrix_E.host_view(), + (ElementE)(content)); + } else { + return false; + } + + cutlass::reorder_meta(matrix_E_reordered.host_ref(), matrix_E.host_ref(), + {problem_size.m(), problem_size.n(), + problem_size.k() / Sparse / ElementsPerElementE}); + + matrix_A.sync_device(); + matrix_B.sync_device(); + matrix_C_computed.sync_device(); + matrix_E_reordered.sync_device(); + + typename IteratorA::Params params_A(matrix_A.layout()); + typename IteratorB::Params params_B(matrix_B.layout()); + typename IteratorE::Params params_E(matrix_E_reordered.layout()); + + cudaError_t result; + + int smem_size = int(sizeof(typename Mma::SharedStorage)); + if (smem_size >= (48 << 10)) { + result = cudaFuncSetAttribute( + test::gemm::threadblock::kernel_multistage_mma_sparse, + cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); + + if (result != cudaSuccess) { + return true; + } + + result = cudaFuncSetAttribute( + test::gemm::threadblock::kernel_multistage_mma_sparse, + cudaFuncAttributePreferredSharedMemoryCarveout, 100); + + if (result != cudaSuccess) { + return true; + } + } + + test::gemm::threadblock::kernel_multistage_mma_sparse + <<>>( + problem_size, params_A, matrix_A.device_ref(), params_B, + matrix_B.device_ref(), matrix_C_computed.device_data(), + matrix_C_computed.layout().stride(0), params_E, + matrix_E_reordered.device_ref()); + + // + // Check error code + // + + result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) + << " kernel error: " << cudaGetErrorString(result); + + matrix_C_computed.sync_host(); + + cutlass::uncompress(matrix_A_uncompressed.host_ref(), matrix_A.host_ref(), + matrix_E.host_ref(), problem_size.m(), + problem_size.k()); + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm(problem_size, ElementC(alpha), + matrix_A_uncompressed.host_view(), matrix_B.host_view(), + ElementC(beta), matrix_C_reference.host_view()); + + bool passed = cutlass::reference::host::TensorEquals( + matrix_C_computed.host_view(), matrix_C_reference.host_view()); + + EXPECT_TRUE(passed); + + if (!passed && CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + + std::cout + << __FILE__ << ":" << __LINE__ << " " + << "A:\n" << matrix_A.host_view() << "\n" + << "B:\n" << matrix_B.host_view() << "\n" + << "E:\n" << matrix_E.host_view() << "\n" + << "Reference:\n" + << matrix_C_reference.host_view() << "\n" + << "Computed:\n" + << matrix_C_computed.host_view() << "\n"; + } + + EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_reference.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_computed.host_view()), 0); + + return passed; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..bda862c828a589269fdbccc25dcae1356ffcea25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_testbed.h @@ -0,0 +1,372 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit testbed for kernel-level GEMM +*/ + +#pragma once + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" +#include "cutlass/core_io.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h" +#include "cutlass/util/distribution.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" + +namespace test { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template +__global__ void kernel_multistage_mma(cutlass::gemm::GemmCoord problem_size, + typename Mma::IteratorA::Params params_A, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::Params params_B, + typename Mma::IteratorB::TensorRef ref_B, + typename Mma::ElementC *ptr_C, + typename Mma::LayoutC::Stride::Index ldc) { + // Shared storage needed by threadblock-scoped matrix multiply-accumulate + + // Dynamic shared memory base pointer + extern __shared__ int GemmSharedStorageBase[]; + + // Declare pointer to dynamic shared memory. + typename Mma::SharedStorage *shared_storage = + reinterpret_cast(GemmSharedStorageBase); + + // Compute threadblock location + cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y), + 0}; + + cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k()}; + + cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(), + tb_tile_offset.n() * Mma::Shape::kN}; + + // Compute position within threadblock + int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A(params_A, ref_A.data(), + {problem_size.m(), problem_size.k()}, + tb_thread_id, tb_offset_A); + + typename Mma::IteratorB iterator_B(params_B, ref_B.data(), + {problem_size.k(), problem_size.n()}, + tb_thread_id, tb_offset_B); + + int warp_id = __shfl_sync(0xffffffff, threadIdx.y, 0); + + // Construct thread-scoped matrix multiply + Mma mma(*shared_storage, tb_thread_id, warp_id, threadIdx.x); + + typename Mma::FragmentC accum; + + accum.clear(); + + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); + + // Output results + typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, threadIdx.x); + + iterator_C.add_tile_offset( + {(tb_tile_offset.m() * Mma::WarpCount::kM) + + (warp_id % Mma::WarpCount::kM), + (tb_tile_offset.n() * Mma::WarpCount::kN) + + (warp_id / Mma::WarpCount::kM)}); + + iterator_C.store(accum); +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Threadblock-level matrix multiply-accumulate + typename MmaCore_> +struct Testbed { + /// Threadblock-level GEMM implementation + using MmaCore = MmaCore_; + using ThreadblockShape = typename MmaCore::Shape; + using WarpShape = typename MmaCore::WarpShape; + using InstructionShape = typename MmaCore::InstructionShape; + using ElementA = typename MmaCore::ElementA; + using LayoutA = typename MmaCore::LayoutA; + using ElementB = typename MmaCore::ElementB; + using LayoutB = typename MmaCore::LayoutB; + using ElementC = typename MmaCore::ElementC; + using LayoutC = typename MmaCore::LayoutC; + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeA = cutlass::Array; + using AccessTypeB = cutlass::Array; + static int const Stages = MmaCore::kStages; + static cutlass::arch::CacheOperation::Kind const CacheOpA = + MmaCore::kCacheOpA; + static cutlass::arch::CacheOperation::Kind const CacheOpB = + MmaCore::kCacheOpB; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define the threadblock-scoped pipelined matrix multiply + using Mma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + CacheOpA, IteratorB, typename MmaCore::SmemIteratorB, CacheOpB, ElementC, + LayoutC, typename MmaCore::MmaPolicy, Stages>; + + // + // Data members + // + + cutlass::HostTensor matrix_A; + cutlass::HostTensor matrix_B; + cutlass::HostTensor matrix_C_computed; + cutlass::HostTensor matrix_C_reference; + + cutlass::gemm::GemmCoord problem_size; + float alpha, beta; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed(int m, int n, int k, float alpha_ = float(1), float beta_ = float(0)) + : problem_size(m, n, k), alpha(alpha_), beta(beta_) { + matrix_A.reset(cutlass::make_Coord(m, k)); + matrix_B.reset(cutlass::make_Coord(k, n)); + matrix_C_computed.reset(cutlass::make_Coord(m, n)); + matrix_C_reference.reset(cutlass::make_Coord(m, n), false); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + + // + // Determine SMEM requirements and waive if not satisfied + // + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + return true; + } + + /// Runs the test + bool run( + dim3 grid, dim3 block, + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + + if (!sufficient()) { + return true; + } + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_A.host_view(), seed, scope_max, scope_min, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_A.host_data(), + matrix_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_B.host_view(), seed + 16, scope_max, scope_min, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_B.host_data(), + matrix_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_B.host_view()); + } else { + return false; + } + + cutlass::reference::host::TensorFill(matrix_C_computed.host_view()); + + cutlass::reference::host::TensorFill(matrix_C_reference.host_view()); + + matrix_A.sync_device(); + matrix_B.sync_device(); + matrix_C_computed.sync_device(); + + typename IteratorA::Params params_A(matrix_A.layout()); + typename IteratorB::Params params_B(matrix_B.layout()); + + cudaError_t result; + + int smem_size = int(sizeof(typename Mma::SharedStorage)); + if (smem_size >= (48 << 10)) { + result = cudaFuncSetAttribute( + test::gemm::threadblock::kernel_multistage_mma, + cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); + + if (result != cudaSuccess) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + result = cudaFuncSetAttribute( + test::gemm::threadblock::kernel_multistage_mma, + cudaFuncAttributePreferredSharedMemoryCarveout, 100); + + if (result != cudaSuccess) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + } + + test::gemm::threadblock::kernel_multistage_mma + <<>>( + problem_size, params_A, matrix_A.device_ref(), params_B, + matrix_B.device_ref(), matrix_C_computed.device_data(), + matrix_C_computed.layout().stride(0)); + + // + // Check error code + // + + result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) + << " kernel error: " << cudaGetErrorString(result); + + matrix_C_computed.sync_host(); + + cutlass::reference::host::Gemm reference_gemm; + + reference_gemm( + problem_size, ElementC(alpha), matrix_A.host_view(), + matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view()); + + bool passed = cutlass::reference::host::TensorEquals( + matrix_C_computed.host_view(), matrix_C_reference.host_view()); + + EXPECT_TRUE(passed); + + if (!passed && CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cout + << __FILE__ << ":" << __LINE__ << " " + << "A:\n" << matrix_A.host_view() << "\n" + << "B:\n" << matrix_B.host_view() << "\n" + << "Reference:\n" + << matrix_C_reference.host_view() << "\n" + << "Computed:\n" + << matrix_C_computed.host_view() << "\n"; + } + + EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_reference.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_computed.host_view()), 0); + + return passed; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_testbed_slicedk.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_testbed_slicedk.h new file mode 100644 index 0000000000000000000000000000000000000000..8810f5faa33a4eeb0fbf27c2b291e8a90b1938ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_testbed_slicedk.h @@ -0,0 +1,387 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit testbed for kernel-level GEMM +*/ + +#pragma once + +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/vector.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_fill.h" + +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h" +#include "cutlass/cutlass.h" +#include "cutlass/platform/platform.h" + +namespace test { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +__global__ void kernel_multistage_mma(cutlass::gemm::GemmCoord problem_size, + typename Mma::IteratorA::Params params_A, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::Params params_B, + typename Mma::IteratorB::TensorRef ref_B, + typename Mma::ElementC **ptr_C, + typename Mma::LayoutC::Stride::Index ldc) { + // Shared storage needed by threadblock-scoped matrix multiply-accumulate + + // Dynamic shared memory base pointer + extern __shared__ int GemmSharedStorageBase[]; + + // Declare pointer to dynamic shared memory. + typename Mma::SharedStorage *shared_storage = + reinterpret_cast(GemmSharedStorageBase); + + // Compute threadblock location + cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y), + 0}; + + cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k()}; + + cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(), + tb_tile_offset.n() * Mma::Shape::kN}; + + // Compute position within threadblock + int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A(params_A, ref_A.data(), + {problem_size.m(), problem_size.k()}, + tb_thread_id, tb_offset_A); + + typename Mma::IteratorB iterator_B(params_B, ref_B.data(), + {problem_size.k(), problem_size.n()}, + tb_thread_id, tb_offset_B); + + int warp_id = __shfl_sync(0xffffffff, threadIdx.y, 0); + int lane_id = threadIdx.x; + + int partitionsK_idx = warp_id / (Mma::WarpCount::kM * Mma::WarpCount::kN); + + // Construct thread-scoped matrix multiply + Mma mma(*shared_storage, tb_thread_id, warp_id, threadIdx.x); + + typename Mma::FragmentC accum; + + accum.clear(); + + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); + + // Output results + typename Mma::Operator::IteratorC iterator_C({ptr_C[partitionsK_idx], ldc}, lane_id); + + int warp_idx_mn = warp_id % (Mma::WarpCount::kM * Mma::WarpCount::kN); + iterator_C.add_tile_offset( + {(tb_tile_offset.m() * Mma::WarpCount::kM) + + (warp_idx_mn % Mma::WarpCount::kM), + (tb_tile_offset.n() * Mma::WarpCount::kN) + + (warp_idx_mn / Mma::WarpCount::kM)}); + + iterator_C.store(accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Threadblock-level matrix multiply-accumulate + typename MmaCore_> +struct Testbed { + /// Threadblock-level GEMM implementation + using MmaCore = MmaCore_; + using ThreadblockShape = typename MmaCore::Shape; + using WarpShape = typename MmaCore::WarpShape; + using InstructionShape = typename MmaCore::InstructionShape; + using ElementA = typename MmaCore::ElementA; + using LayoutA = typename MmaCore::LayoutA; + using ElementB = typename MmaCore::ElementB; + using LayoutB = typename MmaCore::LayoutB; + using ElementC = typename MmaCore::ElementC; + using LayoutC = typename MmaCore::LayoutC; + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeA = cutlass::Array; + using AccessTypeB = cutlass::Array; + static int const Stages = MmaCore::kStages; + static cutlass::arch::CacheOperation::Kind const CacheOpA = + MmaCore::kCacheOpA; + static cutlass::arch::CacheOperation::Kind const CacheOpB = + MmaCore::kCacheOpB; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define the threadblock-scoped pipelined matrix multiply + using Mma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, CacheOpA, + IteratorB, typename MmaCore::SmemIteratorB, CacheOpB, ElementC, LayoutC, + typename MmaCore::MmaPolicy, Stages>; + + static int const kPartitionsK = MmaCore::MmaPolicy::kPartitionsK; + + // + // Data members + // + + cutlass::HostTensor matrix_A; + cutlass::HostTensor matrix_B; + cutlass::HostTensor matrix_C_computed[kPartitionsK]; + cutlass::HostTensor matrix_C_reference; + cutlass::HostTensor matrix_C_pointers; + + cutlass::gemm::GemmCoord problem_size; + float alpha, beta; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed(int m, int n, int k, float alpha_ = float(1), float beta_ = float(0)) + : problem_size(m, n, k), alpha(alpha_), beta(beta_) { + matrix_A.reset(cutlass::make_Coord(m, k)); + matrix_B.reset(cutlass::make_Coord(k, n)); + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + matrix_C_computed[k].reset(cutlass::make_Coord(m, n)); + + matrix_C_reference.reset(cutlass::make_Coord(m, n), false); + matrix_C_pointers.reset(cutlass::Coord<1>(kPartitionsK)); + } + + /// Runs the test + bool run( + dim3 grid, dim3 block, + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_A.host_view(), seed, scope_max, scope_min, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_A.host_data(), + matrix_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_B.host_view(), seed + 16, scope_max, scope_min, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_B.host_data(), + matrix_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_B.host_view()); + } else { + return false; + } + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + cutlass::reference::host::TensorFill(matrix_C_computed[k].host_view()); + + cutlass::reference::host::TensorFill(matrix_C_reference.host_view()); + + matrix_A.sync_device(); + matrix_B.sync_device(); + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + matrix_C_computed[k].sync_device(); + + typename IteratorA::Params params_A(matrix_A.layout()); + typename IteratorB::Params params_B(matrix_B.layout()); + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + matrix_C_pointers.at(cutlass::Coord<1>(k)) = matrix_C_computed[k].device_data(); + + matrix_C_pointers.sync_device(); + + cudaError_t result; + + int smem_size = int(sizeof(typename Mma::SharedStorage)); + if (smem_size >= (48 << 10)) { + result = cudaFuncSetAttribute( + test::gemm::threadblock::kernel_multistage_mma, + cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); + + EXPECT_EQ(result, cudaSuccess) + << " cudaFuncSetAttribute " + "cudaFuncAttributeMaxDynamicSharedMemorySize error: " + << cudaGetErrorString(result); + + result = cudaFuncSetAttribute( + test::gemm::threadblock::kernel_multistage_mma, + cudaFuncAttributePreferredSharedMemoryCarveout, 100); + + EXPECT_EQ(result, cudaSuccess) + << " cudaFuncSetAttribute " + "cudaFuncAttributePreferredSharedMemoryCarveout error: " + << cudaGetErrorString(result); + } + + test::gemm::threadblock::kernel_multistage_mma<<>>( + problem_size, params_A, matrix_A.device_ref(), params_B, + matrix_B.device_ref(), matrix_C_pointers.device_data(), + matrix_C_computed[0].layout().stride(0)); + + // + // Check error code + // + + result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) + << " kernel error: " << cudaGetErrorString(result); + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + matrix_C_computed[k].sync_host(); + + // TODO: this is temporary. it will be removed after slicing can de + // reduction + // + // Reduce matrix_C_computed + // + CUTLASS_PRAGMA_UNROLL + for(int k = 1; k < kPartitionsK; k++) { + CUTLASS_PRAGMA_UNROLL + for(int m = 0; m < matrix_C_computed[0].extent().row(); m++){ + CUTLASS_PRAGMA_UNROLL + for(int n = 0; n < matrix_C_computed[0].extent().column(); n++){ + matrix_C_computed[0].at({m, n}) += matrix_C_computed[k].at({m, n}); + } + } + } + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + problem_size, ElementC(alpha), matrix_A.host_view(), + matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view()); + + bool passed = cutlass::reference::host::TensorEquals( + matrix_C_computed[0].host_view(), matrix_C_reference.host_view()); + + EXPECT_TRUE(passed); + + if (!passed) { + std::ofstream output("mma_multistage_testbed_errors.txt"); + + output + << "A:\n" << matrix_A.host_view() << "\n" + << "B:\n" << matrix_B.host_view() << "\n" + << "Reference:\n" + << matrix_C_reference.host_view() << "\n" + << "Computed:\n" + << matrix_C_computed[0].host_view() << "\n"; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_simt.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_simt.cu new file mode 100644 index 0000000000000000000000000000000000000000..506ca0981aeb9b0fb1b15b22633a1cc95fdc5f0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_simt.cu @@ -0,0 +1,1022 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "mma_pipelined_testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +// sgemm_NT +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_sgemm, sgemm_nt_32x64x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + float, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + float, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + float, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass, + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(32, 64, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_sgemm, sgemm_nt_64x64x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + float, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + float, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + float, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_sgemm, sgemm_nt_32x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + float, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + float, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + float, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(32, 128, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_sgemm, sgemm_nt_64x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + float, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + float, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + float, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 128, 16); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_sgemm, sgemm_nt_128x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + float, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + float, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + float, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 128, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// dgemm_NN +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_dgemm, dgemm_nt_32x64x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + double, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + double, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + double, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(32, 64, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_dgemm, dgemm_nt_64x64x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + double, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + double, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + double, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_dgemm, dgemm_nt_32x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + double, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + double, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + double, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(32, 128, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_dgemm, dgemm_nt_64x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + double, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + double, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + double, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 128, 16); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_dgemm, dgemm_nt_128x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + double, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + double, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + double, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 128, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// igemm_NN +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_igemm, igemm_nt_32x64x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + int, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(32, 64, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_igemm, igemm_nt_64x64x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + int, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_igemm, igemm_nt_32x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + int, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(32, 128, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_igemm, igemm_nt_64x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + int, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 128, 16); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_igemm, igemm_nt_128x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + int, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 128, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// hgemm_NN +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_hgemm, hgemm_nt_32x64x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + cutlass::half_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + cutlass::half_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + cutlass::half_t, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(32, 64, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_hgemm, hgemm_nt_64x64x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + cutlass::half_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + cutlass::half_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + cutlass::half_t, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_hgemm, hgemm_nt_32x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + cutlass::half_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + cutlass::half_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + cutlass::half_t, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(32, 128, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_hgemm, hgemm_nt_64x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + cutlass::half_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + cutlass::half_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + cutlass::half_t, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 128, 16); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM50_hgemm, hgemm_nt_128x128x8_32x64x1) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, + cutlass::half_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + cutlass::half_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + cutlass::half_t, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 128, 48); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +// igemm_NT DP4A +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_igemm, igemm_int8_nt_64x64x16_64x64x4) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_nt_64x64x32_64x64x4) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 32>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 32>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 4096); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_nt_64x64x16_64x64x8) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_nt_128x64x16_64x64x8) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 64, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 64, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_nt_128x128x16_64x64x8) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 128, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 128, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_nt_256x128x16_64x64x8) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<256, 256, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<128, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(256, 256, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_nt_128x256x64_64x64x16) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 256, 64>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 256, 64); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_nt_256x128x64_64x64x16) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<256, 128, 64>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(256, 128, 64); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_tn_64x64x16_64x64x4) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_tn_64x64x32_64x64x4) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 32>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 32>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 4096); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_tn_64x64x16_64x64x8) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} +TEST(SM61_igemm, igemm_int8_tn_128x64x16_64x64x8) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 64, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 64, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 2, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_tn_128x128x16_64x64x8) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 128, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 128, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_tn_256x128x16_64x64x8) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<256, 256, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<128, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(256, 256, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_tn_128x256x64_64x64x16) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<128, 256, 64>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(128, 256, 64); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_tn_256x128x64_64x64x16) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<256, 128, 64>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(256, 128, 64); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 8, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm, igemm_int8_nn_64x64x16_64x64x4) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, + cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2, // Stages, + cutlass::arch::OpMultiplyAdd // Operator, + >; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 1, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_slicedk.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_slicedk.cu new file mode 100644 index 0000000000000000000000000000000000000000..af1d61d0f5a4ff8cb7ad9413615ed8c57b6e4b4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_slicedk.cu @@ -0,0 +1,186 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit tests for CTA-level GEMM specifically for sliced-k kernels (SM_61 and SM_75) +*/ + +#include "mma_pipelined_testbed_slicedk.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +// igemm_NT DP4A +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM61_igemm_sliced_k, igemm_int8_nt_32x32x128_32x32x4) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 32, 128>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 32, 32>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2>; // Stages, + + cutlass::gemm::GemmCoord problem_size(32, 32, 128); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +TEST(SM61_igemm_sliced_k_big, igemm_int8_nt_32x32x128_32x32x4_bigk) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 32, 128>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 32, 32>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2>; // Stages, + + cutlass::gemm::GemmCoord problem_size(32, 32, 1024); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + + +TEST(SM61_igemm_sliced_k, igemm_int8_nt_32x64x128_32x32x4) { + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + cutlass::gemm::GemmShape<32, 64, 128>, // ThreadblockShape, + cutlass::gemm::GemmShape<32, 32, 64>, // WarpShape, + cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, + int8_t, // ElementA, + cutlass::layout::ColumnMajor, // LayoutA, + int8_t, // ElementB, + cutlass::layout::RowMajor, // LayoutB, + int, // ElementC, + cutlass::layout::RowMajor, // LayoutC, + cutlass::arch::OpClassSimt, // OpClass + 2>; // Stages, + + cutlass::gemm::GemmCoord problem_size(32, 64, 256); + float alpha = 1.f; + float beta = 0.0f; + dim3 grid(1, 1); + dim3 block(32, 4, 1); + test::gemm::threadblock::Testbed( + problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) + .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); +} + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Tensor Op GEMM for SM_75 +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_congruous_sliced, tensor_op_64x64x256_tb64x64x64_warp64x32x32_16x8x8) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpMultiplyAdd>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_crosswise_sliced, tensor_op_64x64x256_tb64x64x64_warp64x32x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpMultiplyAdd>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..3374263b5ea9b5b3a230e5ca227a2134db08c560 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm70.cu @@ -0,0 +1,498 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "mma_pipelined_testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_congruous, tensor_op_64x64x32_64x64x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_congruous, tensor_op_128x128x32_64x64x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_congruous, tensor_op_64x64x32_32x32x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_congruous, tensor_op_128x64x32_64x32x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_congruous, tensor_op_128x64x64_64x32x64_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using OperatorShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, OperatorShape, ElementA, LayoutA, ElementB, + LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_congruous, tensor_op_64x128x32_32x64x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_congruous, tensor_op_256x128x32_32x64x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_crosswise, tensor_op_64x64x32_64x64x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_crosswise, tensor_op_128x128x32_64x64x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_crosswise, tensor_op_256x128x32_64x64x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_crosswise, tensor_op_64x64x32_32x32x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_crosswise, tensor_op_128x64x32_64x32x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_crosswise, tensor_op_128x64x64_64x32x64_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using OperatorShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, OperatorShape, ElementA, LayoutA, ElementB, + LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_crosswise, tensor_op_64x128x32_32x64x32_8x8x4) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +#endif // CUTLASS_ARCH_MMA_SM70_SUPPORTED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..ff170ca355b98cde8f12cb53da20fb4a2b95fbb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm75.cu @@ -0,0 +1,2128 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for threadblock-level GEMM +*/ + +#include "mma_pipelined_testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_congruous, tensor_op_64x64x32_64x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_congruous, tensor_op_128x64x32_64x32x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_congruous, tensor_op_64x128x32_32x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_congruous, tensor_op_128x128x32_64x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_congruous, + multicta_256x256x96_128x128x32_64x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_congruous, + multicta_512x256x384_256x128x32_64x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x32_64x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x32x32_16x16x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 32, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 32>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x64x32_16x32x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x32x32_32x16x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 32, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x32_32x32x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x64x32_64x32x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x128x32_32x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x128x32_64x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, + multicta_256x256x96_128x128x32_64x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, + multicta_512x256x384_256x128x32_64x64x32_16x8x8) { + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 384); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST(SM75_gemm_threadblock_interleaved, tensor_op_32x32x64_16x16x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 32, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 64>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x32x64_32x16x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 32, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_32x64x64_16x32x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 64, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x64x64_32x32x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_128x64x64_64x32x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore component + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x128x64_32x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_128x128x64_64x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, + multicta_256x256x192_128x128x64_64x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 192); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, + multicta_512x256x768_256x128x64_64x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<32>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x64_64x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x32x64_16x16x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 32, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 64>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x32x64_32x16x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 32, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x64x64_16x32x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 64, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x64_32x32x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x64x64_64x32x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore component + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x128x64_32x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x128x64_64x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 256); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, + multicta_256x256x192_128x128x64_64x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 192); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, + multicta_512x256x768_256x128x64_64x64x64_8x8x16) { + using ElementA = uint8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = uint8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 768); + + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x128_64x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x32x128_16x16x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 32, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 128>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x32x128_32x16x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 32, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x64x128_16x32x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 64, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x128_32x32x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x64x128_64x32x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore component + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x128x128_32x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x128x128_64x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, + multicta_256x256x384_128x128x128_64x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, + multicta_512x256x1536_256x128x128_64x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 1536); + + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_32x32x128_16x16x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 32, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 128>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x32x128_32x16x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 32, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_32x64x128_16x32x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 64, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x64x128_32x32x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_128x64x128_64x32x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore component + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x128x128_32x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, tensor_op_128x128x128_64x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 512); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, + multicta_256x256x384_128x128x128_64x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 384); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_interleaved, + multicta_512x256x1536_256x128x128_64x64x128_8x8x32) { + using ElementA = cutlass::uint4b_t; + using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>; + using ElementB = cutlass::uint4b_t; + using LayoutB = cutlass::layout::RowMajorInterleaved<64>; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 1536); + + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x512_64x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x32x512_16x16x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 32, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 512>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x32x512_32x16x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 32, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 512>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x64x512_16x32x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 64, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x512_32x32x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x64x512_64x32x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore component + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x128x512_32x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x128x512_64x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, + multicta_256x256x1536_128x128x512_64x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 1536); + + using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_crosswise, + multicta_512x256x6144_256x128x512_64x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 6144); + + using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2, + cutlass::arch::OpXorPopc>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..6e91c01f240b45af0716f1a2700cdaeb15304ebf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm80.cu @@ -0,0 +1,569 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for threadblock-level GEMM +*/ + +#include "mma_pipelined_testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, tensor_op_64x64x16_64x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, tensor_op_128x64x16_64x32x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, tensor_op_64x128x16_32x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, tensor_op_128x128x16_64x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_256x256x96_128x128x16_64x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_congruous, + multicta_512x256x192_256x128x16_64x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, tensor_op_64x64x16_64x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, tensor_op_32x32x16_16x16x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 32, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, tensor_op_32x64x16_16x32x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(32, 64, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<16, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, tensor_op_64x32x16_32x16x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 32, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, tensor_op_64x64x16_32x32x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, tensor_op_128x64x16_64x32x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 64, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, tensor_op_64x128x16_32x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(64, 128, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, tensor_op_128x128x16_64x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(128, 128, 48); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_256x256x48_128x128x16_64x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(256, 256, 48); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_crosswise, + multicta_512x256x192_256x128x16_64x64x16_16x8x4) { + using ElementA = cutlass::tfloat32_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::tfloat32_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::ColumnMajor; + + cutlass::gemm::GemmCoord problem_size(512, 256, 192); + + using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 16>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>; + + dim3 grid(2, 2); + dim3 block(32, 8, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..ac088c28aab3ba1fce350614bf4d8b0bc4c5510f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_testbed.h @@ -0,0 +1,353 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit testbed for kernel-level GEMM +*/ + +#pragma once + +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/vector.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_fill.h" + +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" +#include "cutlass/cutlass.h" +#include "cutlass/platform/platform.h" + +namespace test { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +__global__ void kernel_mma(cutlass::gemm::GemmCoord problem_size, + typename Mma::IteratorA::Params params_A, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::Params params_B, + typename Mma::IteratorB::TensorRef ref_B, + typename Mma::ElementC *ptr_C, + typename Mma::LayoutC::Stride::Index ldc) { + // Shared storage needed by threadblock-scoped matrix multiply-accumulate + __shared__ typename Mma::SharedStorage shared_storage; + + // Compute threadblock location + cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y), + 0}; + + cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k()}; + + cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(), + tb_tile_offset.n() * Mma::Shape::kN}; + + // Compute position within threadblock + int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A(params_A, ref_A.data(), + {problem_size.m(), problem_size.k()}, + tb_thread_id, tb_offset_A); + + typename Mma::IteratorB iterator_B(params_B, ref_B.data(), + {problem_size.k(), problem_size.n()}, + tb_thread_id, tb_offset_B); + + int warp_id = threadIdx.y; + int lane_id = threadIdx.x; + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x); + + typename Mma::FragmentC accum; + + accum.clear(); + + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); + + // Output results + typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, lane_id); + + iterator_C.add_tile_offset( + {(tb_tile_offset.m() * Mma::WarpCount::kM) + + (warp_id % Mma::WarpCount::kM), + (tb_tile_offset.n() * Mma::WarpCount::kN) + + (warp_id / Mma::WarpCount::kM)}); + + iterator_C.store(accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Threadblock-level matrix multiply-accumulate + typename MmaCore_, + /// Number of stages + int Stages = 2> +struct Testbed { + /// Threadblock-level GEMM implementation + using MmaCore = MmaCore_; + using ThreadblockShape = typename MmaCore::Shape; + using WarpShape = typename MmaCore::WarpShape; + using InstructionShape = typename MmaCore::InstructionShape; + using ElementA = typename MmaCore::ElementA; + using LayoutA = typename MmaCore::LayoutA; + using ElementB = typename MmaCore::ElementB; + using LayoutB = typename MmaCore::LayoutB; + using ElementC = typename MmaCore::ElementC; + using LayoutC = typename MmaCore::LayoutC; + static const int kStages = Stages; + + // Define iterators over tiles from the A operand + static const bool use_idp4a = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value; + + static const bool transposeA = cutlass::platform::is_same< LayoutA, cutlass::layout::ColumnMajor >::value; + static const bool transposeB = cutlass::platform::is_same< LayoutB, cutlass::layout::RowMajor >::value; + + using IteratorA = typename cutlass::platform::conditional< use_idp4a, + cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA> , + + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA> + >::type; + + // Define iterators over tiles from the B operand + using IteratorB = typename cutlass::platform::conditional< use_idp4a, + cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB> , + + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB> + >::type; + + // Define MmaPipeline Single Stage + using MmaPipelineSingleStage = cutlass::gemm::threadblock::MmaSingleStage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC, + typename MmaCore::MmaPolicy>; + + // Define MmaPipeline Two Stages + using MmaPipelineTwoStages = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC, + typename MmaCore::MmaPolicy>; + + // Define the threadblock-scoped pipelined matrix multiply (Select between Single vs. Two stages) + using Mma = typename cutlass::platform::conditional<(kStages==1), MmaPipelineSingleStage, MmaPipelineTwoStages>::type; + // + // Data members + // + + cutlass::HostTensor matrix_A; + cutlass::HostTensor matrix_B; + cutlass::HostTensor matrix_C_computed; + cutlass::HostTensor matrix_C_reference; + + cutlass::gemm::GemmCoord problem_size; + float alpha, beta; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed(int m, int n, int k, float alpha_, float beta_) + : problem_size(m, n, k), alpha(alpha_), beta(beta_) { + matrix_A.reset(cutlass::make_Coord(m, k)); + matrix_B.reset(cutlass::make_Coord(k, n)); + matrix_C_computed.reset(cutlass::make_Coord(m, n)); + matrix_C_reference.reset(cutlass::make_Coord(m, n), false); + } + + bool sufficient() { + return true; + } + + /// Runs the test + bool run( + dim3 grid, dim3 block, + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + + // Waive test if insufficient CUDA device + if (!sufficient()) { + if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { + std::cerr << "Test waived due to insufficient CUDA device." << std::endl; + } + return true; + } + + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_A.host_view(), seed, scope_max, scope_min, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_A.host_data(), + matrix_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_B.host_view(), seed + 16, scope_max, scope_min, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_B.host_data(), + matrix_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_B.host_view()); + } else { + return false; + } + + cutlass::reference::host::TensorFill(matrix_C_computed.host_view()); + + cutlass::reference::host::TensorFill(matrix_C_reference.host_view()); + + matrix_A.sync_device(); + matrix_B.sync_device(); + matrix_C_computed.sync_device(); + + typename IteratorA::Params params_A(matrix_A.layout()); + typename IteratorB::Params params_B(matrix_B.layout()); + + test::gemm::threadblock::kernel_mma<<>>( + problem_size, params_A, matrix_A.device_ref(), params_B, + matrix_B.device_ref(), matrix_C_computed.device_data(), + matrix_C_computed.layout().stride(0)); + + // + // Check error code + // + + cudaError_t result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) + << " kernel error: " << cudaGetErrorString(result) << " on device " << GetCudaDevice(); + + matrix_C_computed.sync_host(); + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + problem_size, ElementC(alpha), matrix_A.host_view(), + matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view()); + + bool passed = cutlass::reference::host::TensorEquals( + matrix_C_computed.host_view(), matrix_C_reference.host_view()); + + EXPECT_TRUE(passed) << "Failed on device " << GetCudaDevice(); + + if (!passed) { + std::ofstream output("mma_pipelined_testbed_errors.txt"); + + output + << "A:\n" << matrix_A.host_view() << "\n" + << "B:\n" << matrix_B.host_view() << "\n" + << "Reference:\n" + << matrix_C_reference.host_view() << "\n" + << "Computed:\n" + << matrix_C_computed.host_view() << "\n"; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_testbed_slicedk.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_testbed_slicedk.h new file mode 100644 index 0000000000000000000000000000000000000000..688514ca5d38d75f9798d5d15b2e10b852b6efe2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_testbed_slicedk.h @@ -0,0 +1,370 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit testbed for kernel-level GEMM +*/ + +#pragma once + +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/vector.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_fill.h" + +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" +#include "cutlass/cutlass.h" +#include "cutlass/platform/platform.h" + +namespace test { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +__global__ void kernel_mma(cutlass::gemm::GemmCoord problem_size, + typename Mma::IteratorA::Params params_A, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::Params params_B, + typename Mma::IteratorB::TensorRef ref_B, + typename Mma::ElementC **ptr_C, + typename Mma::LayoutC::Stride::Index ldc) { + // Shared storage needed by threadblock-scoped matrix multiply-accumulate + __shared__ typename Mma::SharedStorage shared_storage; + + // Compute threadblock location + cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y), + 0}; + + cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k()}; + + cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(), + tb_tile_offset.n() * Mma::Shape::kN}; + + // Compute position within threadblock + int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A(params_A, ref_A.data(), + {problem_size.m(), problem_size.k()}, + tb_thread_id, tb_offset_A); + + typename Mma::IteratorB iterator_B(params_B, ref_B.data(), + {problem_size.k(), problem_size.n()}, + tb_thread_id, tb_offset_B); + + int warp_id = threadIdx.y; + int lane_id = threadIdx.x; + + int partitionsK_idx = warp_id / (Mma::WarpCount::kM * Mma::WarpCount::kN); + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x); + + typename Mma::FragmentC accum; + + accum.clear(); + + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); + + // Output results + typename Mma::Operator::IteratorC iterator_C({ptr_C[partitionsK_idx], ldc}, lane_id); + + + int warp_idx_mn = warp_id % (Mma::WarpCount::kM * Mma::WarpCount::kN); + iterator_C.add_tile_offset( + {(tb_tile_offset.m() * Mma::WarpCount::kM) + + (warp_idx_mn % Mma::WarpCount::kM), + (tb_tile_offset.n() * Mma::WarpCount::kN) + + (warp_idx_mn / Mma::WarpCount::kM)}); + + iterator_C.store(accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Threadblock-level matrix multiply-accumulate + typename MmaCore_> +struct Testbed { + /// Threadblock-level GEMM implementation + using MmaCore = MmaCore_; + using ThreadblockShape = typename MmaCore::Shape; + using WarpShape = typename MmaCore::WarpShape; + using InstructionShape = typename MmaCore::InstructionShape; + using ElementA = typename MmaCore::ElementA; + using LayoutA = typename MmaCore::LayoutA; + using ElementB = typename MmaCore::ElementB; + using LayoutB = typename MmaCore::LayoutB; + using ElementC = typename MmaCore::ElementC; + using LayoutC = typename MmaCore::LayoutC; + + // Define iterators over tiles from the A operand + static const bool use_idp4a = cutlass::platform::is_same::value && + cutlass::platform::is_same::value && + cutlass::platform::is_same::value; + + static const bool transposeA = cutlass::platform::is_same< LayoutA, cutlass::layout::ColumnMajor >::value; + static const bool transposeB = cutlass::platform::is_same< LayoutB, cutlass::layout::RowMajor >::value; + + using IteratorA = typename cutlass::platform::conditional< use_idp4a, + cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA> , + + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA> + >::type; + + // Define iterators over tiles from the B operand + using IteratorB = typename cutlass::platform::conditional< use_idp4a, + cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB> , + + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB> + >::type; + + // Define the threadblock-scoped pipelined matrix multiply + using Mma = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC, + typename MmaCore::MmaPolicy>; + + static int const kPartitionsK = MmaCore::MmaPolicy::kPartitionsK; + + // + // Data members + // + + cutlass::HostTensor matrix_A; + cutlass::HostTensor matrix_B; + cutlass::HostTensor matrix_C_computed[kPartitionsK]; + cutlass::HostTensor matrix_C_reference; + cutlass::HostTensor matrix_C_pointers; + + cutlass::gemm::GemmCoord problem_size; + float alpha, beta; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed(int m, int n, int k, float alpha_, float beta_) + : problem_size(m, n, k), alpha(alpha_), beta(beta_) { + matrix_A.reset(cutlass::make_Coord(m, k)); + matrix_B.reset(cutlass::make_Coord(k, n)); + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + matrix_C_computed[k].reset(cutlass::make_Coord(m, n)); + + matrix_C_reference.reset(cutlass::make_Coord(m, n), false); + matrix_C_pointers.reset(cutlass::Coord<1>(kPartitionsK)); + } + + /// Runs the test + bool run( + dim3 grid, dim3 block, + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_A.host_view(), seed, scope_max, scope_min, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_A.host_data(), + matrix_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_B.host_view(), seed + 16, scope_max, scope_min, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(matrix_B.host_data(), + matrix_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(matrix_B.host_view()); + } else { + return false; + } + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + cutlass::reference::host::TensorFill(matrix_C_computed[k].host_view()); + + cutlass::reference::host::TensorFill(matrix_C_reference.host_view()); + + matrix_A.sync_device(); + matrix_B.sync_device(); + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + matrix_C_computed[k].sync_device(); + + typename IteratorA::Params params_A(matrix_A.layout()); + typename IteratorB::Params params_B(matrix_B.layout()); + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + matrix_C_pointers.at(cutlass::Coord<1>(k)) = matrix_C_computed[k].device_data(); + + matrix_C_pointers.sync_device(); + + test::gemm::threadblock::kernel_mma<<>>( + problem_size, params_A, matrix_A.device_ref(), params_B, + matrix_B.device_ref(), matrix_C_pointers.device_data(), + matrix_C_computed[0].layout().stride(0)); + + // + // Check error code + // + + cudaError_t result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) + << " kernel error: " << cudaGetErrorString(result); + + CUTLASS_PRAGMA_UNROLL + for(int k = 0; k < kPartitionsK; k++) + matrix_C_computed[k].sync_host(); + + // TODO: this is temporary. it will be removed after slicing can de + // reduction + // + // Reduce matrix_C_computed + // + CUTLASS_PRAGMA_UNROLL + for(int k = 1; k < kPartitionsK; k++) { + CUTLASS_PRAGMA_UNROLL + for(int m = 0; m < matrix_C_computed[0].extent().row(); m++){ + CUTLASS_PRAGMA_UNROLL + for(int n = 0; n < matrix_C_computed[0].extent().column(); n++){ + matrix_C_computed[0].at({m, n}) += matrix_C_computed[k].at({m, n}); + } + } + } + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + problem_size, ElementC(alpha), matrix_A.host_view(), + matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view()); + + bool passed = cutlass::reference::host::TensorEquals( + matrix_C_computed[0].host_view(), matrix_C_reference.host_view()); + + EXPECT_TRUE(passed); + + if (!passed) { + std::ofstream output("mma_pipelined_testbed_errors.txt"); + + output + << "A:\n" << matrix_A.host_view() << "\n" + << "B:\n" << matrix_B.host_view() << "\n" + << "Reference:\n" + << matrix_C_reference.host_view() << "\n" + << "Computed:\n" + << matrix_C_computed[0].host_view() << "\n"; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_wmma_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_wmma_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..28dc1c8736f18013cc4c49b92c48a07e4ba4317e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_wmma_sm70.cu @@ -0,0 +1,766 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include "mma_pipelined_testbed.h" +#include "cutlass/gemm/threadblock/default_mma_core_wmma.h" + +/// All tests use double-buffered (kStages=2) mma pipeline for the gemm mainloop +/// Test name format: SM[arch]_gemm_threadblock_wmma_tensor_op_[alayout]_[blayout]_[clayout]_[dtype].[threadblock_shape]_[warp_shape] + +//////////////// [START] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [START] ////////////////////// + +/////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m16n16k16.f16.f16 (wmma native size 16x16x16) +//////////////////////////////////////////////////////////// + +// tests for {N,T}x{N,T}=>{T} +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +/////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 (wmma native size 16x16x16) +/////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_col_row_row_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM70_gemm_threadblock_wmma_tensor_op_col_row_row_f16, 128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(128, 128, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +/////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 (wmma native size 16x16x16) +/////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_row_row_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_row_row_f16, 128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(128, 128, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +/////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.col.m16n16k16.f16.f16 (wmma native size 16x16x16) +/////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_col_col_row_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM70_gemm_threadblock_wmma_tensor_op_col_col_row_f16, 128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(128, 128, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +// tests for {N,T}x{N,T}=>{N} +/////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m16n16k16.f16.f16 (wmma native size 16x16x16) +//////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_col_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +/////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 (wmma native size 16x16x16) +/////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_col_row_col_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +/////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 (wmma native size 16x16x16) +/////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_row_col_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +/////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.col.m16n16k16.f16.f16 (wmma native size 16x16x16) +/////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_col_col_col_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::ColumnMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +//////////////// [END] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [END] ////////////////////// + +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f16, 128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(128, 128, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f16, multicta_256x256x96_128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(256, 256, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +/////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m32n8k16.f16.f16 (wmma native size 32x8x16) +/////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_32x8x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m8n32k16.f16.f16 (wmma native size 8x32x16) +////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_8x32x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +////////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m16n16k16.f32.f32 (wmma native size 16x16x16) +////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f32, multicta_256x256x96_128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(256, 256, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +/////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m32n8k16.f32.f32 (wmma native size 32x8x16) +//////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_32x8x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m8n32k16.f32.f32 (wmma native size 8x32x16) +///////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_8x32x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_wmma_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_wmma_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..857b8c6e05e49d3c589f85c4d01a89eb6edcd0cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_wmma_sm75.cu @@ -0,0 +1,337 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM75_ENABLED +#include "mma_pipelined_testbed.h" +#include "cutlass/gemm/threadblock/default_mma_core_wmma.h" + +/// All tests use double-buffered (kStages=2) mma pipeline for the gemm mainloop +/// Test name format: SM[arch]_gemm_threadblock_wmma_tensor_op_[alayout]_[blayout]_[clayout]_[atype].[threadblock_shape]_[warp_shape]_[instruction_shape] + +///////////////////////////////////////////////////////////////////////// +/// Integer (s8 and u8) WMMA threadblock level tests ///// +///////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED) +TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s8, 64x64x32_64x64x32_16x16x16) { + + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s8, 64x64x64_64x64x64_16x16x16) { + + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +TEST(SM75_gemm_threadblock_wmma_tensor_op_col_row_row_s8, 64x64x32_64x64x32_16x16x16) { + + using ElementA = int8_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_wmma_tensor_op_col_row_row_s8, 64x64x64_64x64x64_16x16x16) { + + using ElementA = int8_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} +#endif //CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED + + +//////////////////////////////////////////////////////////////////////// +/// SUBBYTE (s4 and b1) WMMA threadblock level tests //// +/////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED) +TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s4, 64x64x128_64x64x128_8x8x32) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_col_s4, 64x64x64_64x64x64_8x8x32) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_b1, 64x64x512_64x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_col_b1, 64x64x512_64x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + static const int kStages = 2; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +#endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED + +#endif //CUTLASS_ARCH_WMMA_SM75_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_planar_complex_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_planar_complex_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..0b6dc11db5b344017d1672b884077ac09e12b8cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_planar_complex_sm80.cu @@ -0,0 +1,79 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for threadblock-level GEMM +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h" + +#include "mma_planar_complex_testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_gemm_threadblock_planar_complex_congruous, tensor_op_64x64x32_64x64x32_16x8x16_3stage) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + + cutlass::gemm::GemmCoord problem_size(64, 64, 8); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + int const Stages = 3; + + // Define the MmaCore components + using Mma = typename cutlass::gemm::threadblock::DefaultMmaPlanarComplexMultistage< + ElementA, LayoutA, 8, + ElementB, LayoutB, 8, + ElementC, LayoutC, + cutlass::arch::OpClassTensorOp, + cutlass::arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + Stages>::ThreadblockMma; + + dim3 grid(1, 1); + dim3 block(32, Mma::WarpCount::kCount, 1); + + test::gemm::threadblock::TestbedPlanarComplex(problem_size.m(), problem_size.n(), + problem_size.k()) + .run(grid, block); +} + +//////////////////////////////////////////////////////////////////////////////// +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_planar_complex_testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_planar_complex_testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..45b50fac9e4b0f7b8a3b328cc3920501313e5d7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_planar_complex_testbed.h @@ -0,0 +1,350 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit testbed for kernel-level GEMM +*/ + +#pragma once + +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/cutlass.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/vector.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor_planar_complex.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/gemm_planar_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_fill.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +__global__ void kernel_mma_planar_complex( + cutlass::gemm::GemmCoord problem_size, + typename Mma::IteratorA::Params params_A, + typename Mma::IteratorA::Element *ptr_A, + int64_t imaginary_stride_A, + typename Mma::IteratorB::Params params_B, + typename Mma::IteratorB::Element *ptr_B, + int64_t imaginary_stride_B, + typename Mma::ElementC *ptr_C, + typename Mma::LayoutC::Stride::Index ldc, int64_t imaginary_stride_C) { + + // Shared storage needed by threadblock-scoped matrix multiply-accumulate + __shared__ typename Mma::SharedStorage shared_storage; + + // Compute threadblock location + cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y), + 0}; + + cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k()}; + + cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(), + tb_tile_offset.n() * Mma::Shape::kN}; + + // Compute position within threadblock + int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + // Construct iterators to A operand + typename Mma::IteratorA iterator_A_real(params_A, ptr_A, + {problem_size.m(), problem_size.k()}, + tb_thread_id, tb_offset_A); + + typename Mma::IteratorA iterator_A_imag(params_A, ptr_A + imaginary_stride_A, + {problem_size.m(), problem_size.k()}, + tb_thread_id, tb_offset_A); + + // Construct iterators to B operand + typename Mma::IteratorB iterator_B_real(params_B, ptr_B, + {problem_size.k(), problem_size.n()}, + tb_thread_id, tb_offset_B); + + typename Mma::IteratorB iterator_B_imag(params_B, ptr_B + imaginary_stride_B, + {problem_size.k(), problem_size.n()}, + tb_thread_id, tb_offset_B); + + int warp_id = threadIdx.y; + int lane_id = threadIdx.x; + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x); + + typename Mma::FragmentC accum; + + accum.clear(); + + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accum, iterator_A_real, iterator_A_imag, iterator_B_real, iterator_B_imag, accum); + + // Output results + typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, lane_id); + + iterator_C.add_tile_offset( + {(tb_tile_offset.m() * Mma::WarpCount::kM) + + (warp_id % Mma::WarpCount::kM), + (tb_tile_offset.n() * Mma::WarpCount::kN) + + (warp_id / Mma::WarpCount::kM)}); + + iterator_C.store(accum.real); + + iterator_C.store_with_pointer_offset(accum.imag, imaginary_stride_C); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Threadblock-level matrix multiply-accumulate + typename Mma_> +struct TestbedPlanarComplex { + + using Mma = Mma_; + using ThreadblockShape = typename Mma::Shape; + using IteratorA = typename Mma::IteratorA; + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using IteratorB = typename Mma::IteratorB; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Mma::ElementC; + using ElementAccumulator = typename Mma::ElementC; + using LayoutC = typename Mma::LayoutC; + using ThreadMapA = typename Mma::IteratorA::ThreadMap; + using ThreadMapB = typename Mma::IteratorB::ThreadMap; + using AccessTypeA = cutlass::Array; + using AccessTypeB = cutlass::Array; + static int const Stages = Mma::kStages; + static cutlass::arch::CacheOperation::Kind const CacheOpA = + Mma::kCacheOpA; + static cutlass::arch::CacheOperation::Kind const CacheOpB = + Mma::kCacheOpB; + + // + // Data members + // + + cutlass::HostTensorPlanarComplex matrix_A; + cutlass::HostTensorPlanarComplex matrix_B; + cutlass::HostTensorPlanarComplex matrix_C_computed; + cutlass::HostTensorPlanarComplex matrix_C_reference; + + cutlass::gemm::GemmCoord problem_size; + + // + // Methods + // + + /// Allocates workspace in device memory + TestbedPlanarComplex(int m, int n, int k) + : problem_size(m, n, k) { + + matrix_A.reset(cutlass::make_Coord(m, k)); + matrix_B.reset(cutlass::make_Coord(k, n)); + matrix_C_computed.reset(cutlass::make_Coord(m, n)); + matrix_C_reference.reset(cutlass::make_Coord(m, n), false); + } + + /// Runs the test + bool run( + dim3 grid, dim3 block, + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_A.host_view(), seed, scope_max, scope_min, 0); + + } else if (init_A == cutlass::Distribution::Sequential) { + + for (int i = 0; i < matrix_A.capacity() * 2; ++i) { + matrix_A.host_data()[i] = cutlass::half_t(float(i % 5) - 2); + } + /* + cutlass::reference::host::BlockFillSequential(matrix_A.host_data(), + matrix_A.capacity() * 2); + */ + } else if (init_A == cutlass::Distribution::Identity) { + //cutlass::reference::host::TensorFillIdentity(matrix_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + + + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + matrix_B.host_view(), seed + 16, scope_max, scope_min, 0); + + + } else if (init_B == cutlass::Distribution::Sequential) { + + cutlass::reference::host::BlockFillSequential(matrix_B.host_data(), + matrix_B.capacity() * 2); + + for (int i = 0; i < matrix_B.capacity() * 2; ++i) { + matrix_B.host_data()[i] = cutlass::half_t(float((i + 3) % 5) - 2); + } + + + } else if (init_B == cutlass::Distribution::Identity) { + + //cutlass::reference::host::TensorFillIdentity(matrix_B.host_view()); + + } else { + return false; + } + + matrix_A.sync_device(); + matrix_B.sync_device(); + matrix_C_computed.sync_device(); + + typename IteratorA::Params params_A(matrix_A.layout()); + typename IteratorB::Params params_B(matrix_B.layout()); + + test::gemm::threadblock::kernel_mma_planar_complex<<>>( + problem_size, + params_A, + matrix_A.device_data(), + matrix_A.imaginary_stride(), + params_B, + matrix_B.device_data(), + matrix_B.imaginary_stride(), + matrix_C_computed.device_data(), + matrix_C_computed.layout().stride(0), + matrix_C_computed.imaginary_stride() + ); + + + // + // Check error code + // + + cudaError_t result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) + << " kernel error: " << cudaGetErrorString(result); + + matrix_C_computed.sync_host(); + + cutlass::reference::host::GemmPlanarComplex< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementAccumulator + >( + problem_size, + cutlass::complex(ElementAccumulator(1)), + matrix_A.host_ref(), + Mma::kTransformA, + matrix_B.host_ref(), + Mma::kTransformB, + cutlass::complex(ElementAccumulator(0)), + matrix_C_reference.host_ref(), + matrix_C_reference.host_ref() + ); + + bool passed = cutlass::reference::host::TensorEquals( + matrix_C_computed.host_view(), + matrix_C_reference.host_view() + ); + + EXPECT_TRUE(passed); + + if (!passed) { + std::ofstream output("mma_pipelined_testbed_errors.txt"); + + output + << "A:\n" << matrix_A.host_view() << "\n" + << "B:\n" << matrix_B.host_view() << "\n" + << "Reference:\n" + << matrix_C_reference.host_view() << "\n" + << "Computed:\n" + << matrix_C_computed.host_view() << "\n"; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_singlestage_wmma_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_singlestage_wmma_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..06a3ebb1cf50ffbaeab77584a8dd5be2aa620907 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_singlestage_wmma_sm70.cu @@ -0,0 +1,417 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED +#include "mma_pipelined_testbed.h" +#include "cutlass/gemm/threadblock/default_mma_core_wmma.h" + +/// All tests use single staged (kStages=1) mma pipeline for the gemm mainloop +/// Test name format: SM[arch]_gemm_threadblock_singlestage_wmma_[alayout]_[blayout]_[clayout]_[dtype].[threadblock_shape]_[warp_shape] + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +/// WMMA Floating point (f16 accumulation) - Single stage - Threadblock level tests //// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 32); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, 128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(128, 128, 64); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, multicta_256x256x96_128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(256, 256, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +/////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m32n8k16.f16.f16 (wmma native size 32x8x16) +/////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_32x8x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m8n32k16.f16.f16 (wmma native size 8x32x16) +////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_8x32x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = cutlass::half_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +/// WMMA Floating point (f32 accumulation) - Single stage - Threadblock level tests //// +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +////////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m16n16k16.f32.f32 (wmma native size 16x16x16) +////////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, 128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(128, 128, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, multicta_256x256x96_128x128x32_64x64x32_16x16x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(256, 256, 96); + + using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(2, 2); + dim3 block(32, 4, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +/////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m32n8k16.f32.f32 (wmma native size 32x8x16) +//////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_32x8x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +///////////////////////////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m8n32k16.f32.f32 (wmma native size 8x32x16) +///////////////////////////////////////////////////////////////////////////////// +TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_8x32x16) { + + using ElementA = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::half_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = float; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_singlestage_wmma_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_singlestage_wmma_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..b5fbd15a5621d9c8de0269928397d6a88702fdca --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/threadblock/mma_singlestage_wmma_sm75.cu @@ -0,0 +1,336 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ +#include "cutlass/arch/wmma.h" + +#ifdef CUTLASS_ARCH_WMMA_SM75_ENABLED +#include "mma_pipelined_testbed.h" +#include "cutlass/gemm/threadblock/default_mma_core_wmma.h" + +/// All tests use single staged (kStages=1) mma pipeline for the gemm mainloop +/// Test name format: SM[arch]_gemm_threadblock_singlestage_wmma_tensor_op_[alayout]_[blayout]_[clayout]_[atype].[threadblock_shape]_[warp_shape]_[instruction_shape] + +///////////////////////////////////////////////////////////////////////// +/// Integer (s8 and u8) WMMA threadblock level tests //// +///////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED) +TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_s8, 64x64x32_64x64x32_16x16x16) { + + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_s8, 64x64x64_64x64x64_16x16x16) { + + using ElementA = int8_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_col_row_row_s8, 64x64x32_64x64x32_16x16x16) { + + using ElementA = int8_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_col_row_row_s8, 64x64x64_64x64x64_16x16x16) { + + using ElementA = int8_t; + using LayoutA = cutlass::layout::ColumnMajor; + using ElementB = int8_t; + using LayoutB = cutlass::layout::RowMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + + float alpha = 1.f; + float beta = 0.0f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} +#endif //CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED + + +//////////////////////////////////////////////////////////////////////// +/// SUBBYTE (s4 and b1) WMMA threadblock level tests //// +/////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED) +TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_s4, 64x64x128_64x64x128_8x8x32) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 128); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + + +TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_col_s4, 64x64x64_64x64x64_8x8x32) { + using ElementA = cutlass::int4b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::int4b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 64); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_b1, 64x64x512_64x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::RowMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} + +TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_col_b1, 64x64x512_64x64x512_8x8x128) { + using ElementA = cutlass::uint1b_t; + using LayoutA = cutlass::layout::RowMajor; + using ElementB = cutlass::uint1b_t; + using LayoutB = cutlass::layout::ColumnMajor; + using ElementC = int32_t; + using LayoutC = cutlass::layout::ColumnMajor; + static const int kStages = 1; + + cutlass::gemm::GemmCoord problem_size(64, 64, 2048); + + using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; + using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + + float alpha = 1.f; + float beta = 0.f; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadBlockShape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpClassWmmaTensorOp, kStages, + cutlass::arch::OpXorPopc>; + + dim3 grid(1, 1); + dim3 block(32, 1, 1); + + test::gemm::threadblock::Testbed(problem_size.m(), problem_size.n(), + problem_size.k(), alpha, beta) + .run(grid, block); +} +#endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED + +#endif //CUTLASS_ARCH_WMMA_SM75_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..1415da43aea4da2d9dfb5be77647f7ee9d0bc6e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/CMakeLists.txt @@ -0,0 +1,45 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_gemm_warp + gemm_sm50.cu + gemm_sm60.cu + gemm_sm61.cu + gemm_sm70.cu + gemm_sm75.cu + gemm_sm80.cu + gemm_complex_sm80.cu + gemm_sparse_sm80.cu + gemm_gaussian_complex_sm80.cu + gemm_sm90.cu + gemm_complex_sm90.cu + wmma_sm70.cu + wmma_sm72.cu + wmma_sm75.cu +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_complex_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_complex_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..28410ad0e8ed7798c59b8615513aeed9d217bc21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_complex_sm80.cu @@ -0,0 +1,698 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM +*/ + +#include "cutlass/cutlass.h" +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_complex_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// complex * complex => complex +// Input data type: complex +// Math instruction: mma.sync.aligned.m8n8k4.f64.f64.f64.f64 +// Output data type: complex +/////////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_warp_gemm_complex_tensor_op_f64, 8x8x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<8, 8, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x16x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x32x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x16x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_nh) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kConjugate + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_ct) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kNone + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 8x8x4_8x8x4_tn) { + + using Shape = cutlass::gemm::GemmShape<8, 8, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x16x4_8x8x4_tn) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// complex * complex => complex +// Input data type: complex +// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +// Output data type: complex +// Shared memory layout: Congrous +//////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x8_16x8x8_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 8> >() + .run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x16_16x8x8_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >() + .run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x32x8_16x8x8_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 32, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<16, 32, 8> >() + .run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x16x8_16x16x8_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 16, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<32, 16, 8> >() + .run(); +} + + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() + .run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_nh) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kConjugate + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() + .run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_ct) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kNone + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() + .run(); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// complex * complex => complex +// Input data type: complex +// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +// Output data type: complex +// Shared memory layout: Crosswise +//////////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x8_16x8x8_tn) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 8> >() + .run(); +} + +// TEST FAILS crosswise complex TN mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 test fails for k = 2*8 = 16 +TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x16_16x8x8_tn) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >() + .run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_tn) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() + .run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x64x8_16x8x8_tn) { + + using Shape = cutlass::gemm::GemmShape<32, 64, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<32, 64, 8> >() + .run(); +} + +TEST(SM80_warp_gemm_complex_tensor_op_f32, 64x32x8_16x8x8_tn) { + + using Shape = cutlass::gemm::GemmShape<64, 32, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<64, 32, 8> >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x8_8x8x4_tn) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x8_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TransformedTestbedComplex< + MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_complex_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_complex_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..a1707de1bdc0f9337950e284a451f01958a58291 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_complex_sm90.cu @@ -0,0 +1,332 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM with Hopper FP64 +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_complex_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +TEST(SM90_warp_gemm_complex_tensor_op_f64, 16x8x4_16x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 8, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 16x16x4_16x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 16x32x4_16x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 32x16x4_16x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 32x32x4_16x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 32x32x4_16x8x4_nh) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kConjugate + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 32x32x4_16x8x4_ct) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kNone + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 16x8x4_16x8x4_tn) { + + using Shape = cutlass::gemm::GemmShape<16, 8, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 16x16x4_16x8x4_tn) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 32x32x16_16x8x4_tn) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex().run(); +} + +TEST(SM90_warp_gemm_complex_tensor_op_f64, 64x64x4_16x8x4_tn) { + + using Shape = cutlass::gemm::GemmShape<64, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor + >::Type; + + test::gemm::warp::TestbedComplex().run(); +} +#endif // if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_gaussian_complex_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_gaussian_complex_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..e6f71ce64b52a10acd46c5ca6ae7d56f1259da5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_gaussian_complex_sm80.cu @@ -0,0 +1,287 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM +*/ + +#include "cutlass/cutlass.h" +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_complex_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_gaussian_complex_tensor_op, 8x8x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<8, 8, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_gaussian_complex_tensor_op, 16x16x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + + +TEST(SM80_warp_gemm_gaussian_complex_tensor_op, 16x32x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<16, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_gaussian_complex_tensor_op, 32x16x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_gaussian_complex_tensor_op, 32x32x4_8x8x4_nt) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_gaussian_complex_tensor_op, 32x32x4_8x8x4_nh) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kConjugate, + cutlass::arch::OpMultiplyAddGaussianComplex + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +TEST(SM80_warp_gemm_gaussian_complex_tensor_op, 32x32x4_8x8x4_ct) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kConjugate, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_gaussian_complex_tensor_op, 16x16x4_8x8x4_tn) { + + using Shape = cutlass::gemm::GemmShape<16, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + + using Element = cutlass::complex; + using ElementC = cutlass::complex; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + Shape, + InstructionShape, + Element, + LayoutA, + Element, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::ComplexTransform::kNone, + cutlass::ComplexTransform::kNone, + cutlass::arch::OpMultiplyAddGaussianComplex + >::Type; + + test::gemm::warp::TestbedComplex >().run(); +} +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm50.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm50.cu new file mode 100644 index 0000000000000000000000000000000000000000..5a9e2e27ad1ba3d43366de13768f752718ce290e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm50.cu @@ -0,0 +1,654 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/complex.h" +#include "cutlass/quaternion.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma_simt.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +// NT SMEM layout +TEST(SM50_warp_gemm_f32_col_row_col, 32x16x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +// TN SMEM layout +TEST(SM50_warp_gemm_f32_row_col_col, 32x16x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +// TT SMEM layout +TEST(SM50_warp_gemm_f32_row_row_col, 32x16x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +// NN SMEM layout +TEST(SM50_warp_gemm_f32_col_col_col, 32x16x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +// NT SMEM layout +TEST(SM50_warp_gemm_f32_col_row_row, 16x32x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<4, 8>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<16, 32, 8>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +// TN SMEM layout +TEST(SM50_warp_gemm_f32_row_col_row, 16x32x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<4, 8>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<16, 32, 8>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// NT SMEM layout +TEST(SM50_warp_gemm_f32_col_row_col, 32x16x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_f32_col_row_row, 32x16x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +// TN SMEM layout +TEST(SM50_warp_gemm_f32_row_col_col, 32x16x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_f32_row_col_row, 32x16x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} +///////////////////////////////////////////////////////////////////////////////////////////////// +// NT SMEM layout +TEST(SM50_warp_gemm_f32_col_row_col, 32x64x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 32, 8>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_f32_col_row_row, 32x64x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<4, 8>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 64, 8>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +// TN SMEM layout +TEST(SM50_warp_gemm_f32_row_col_col, 32x64x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 32, 8>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_f32_row_col_row, 32x64x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<4, 8>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 64, 8>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_warp_gemm_complex_f32_col_row_col, 64x32x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using complex_f32_t = cutlass::complex; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 32, 8>, + complex_f32_t, + cutlass::layout::ColumnMajor, + complex_f32_t, + cutlass::layout::RowMajor, + complex_f32_t, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_complex_f32_col_row_row, 64x32x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using complex_f32_t = cutlass::complex; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 32, 8>, + complex_f32_t, + cutlass::layout::ColumnMajor, + complex_f32_t, + cutlass::layout::RowMajor, + complex_f32_t, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_warp_gemm_f64_col_row_col, 8x4x1_1x1x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<8, 4, 8>, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_f64_col_row_row, 8x4x1_1x1x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<8, 4, 8>, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_warp_gemm_f64_col_row_col, 32x16x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_f64_col_row_row, 32x16x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_warp_gemm_f64_col_row_col, 64x32x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 32, 8>, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_f64_col_row_row, 64x32x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 32, 8>, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_warp_gemm_complex_f64_col_row_col, 32x16x1_1x1x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + using complex_f64_t = cutlass::complex; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 16, 8>, + complex_f64_t, + cutlass::layout::ColumnMajor, + complex_f64_t, + cutlass::layout::RowMajor, + complex_f64_t, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +TEST(SM50_warp_gemm_complex_f64_col_row_row, 32x16x1_1x1x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::RowMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + using complex_f64_t = cutlass::complex; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 16, 8>, + complex_f64_t, + cutlass::layout::ColumnMajor, + complex_f64_t, + cutlass::layout::RowMajor, + complex_f64_t, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_warp_gemm_quaternion_f32_col_row_col, 16x8x8_1x1x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + using quaternion_f32_t = cutlass::Quaternion; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<16, 8, 8>, + quaternion_f32_t, + cutlass::layout::ColumnMajor, + quaternion_f32_t, + cutlass::layout::RowMajor, + quaternion_f32_t, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_warp_gemm_quaternion_f32_col_row_row, 16x8x8_1x1x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + using quaternion_f32_t = cutlass::Quaternion; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<16, 8, 8>, + quaternion_f32_t, + cutlass::layout::ColumnMajor, + quaternion_f32_t, + cutlass::layout::RowMajor, + quaternion_f32_t, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm60.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm60.cu new file mode 100644 index 0000000000000000000000000000000000000000..03ba3eaeecead2857955fe6cb7b779475db2ca6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm60.cu @@ -0,0 +1,140 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma_simt.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM60_warp_gemm_f16_col_row, 8x4x1_1x1x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<8, 4, 8>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM60_warp_gemm_f16_col_row, 16x8x1_2x2x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<16, 8, 8>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM60_warp_gemm_f16_col_row, 32x16x1_4x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 8>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM60_warp_gemm_f16_col_row, 64x16x1_8x4x1) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<8, 8, 1> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 32, 8>, + cutlass::half_t, + cutlass::layout::ColumnMajor, + cutlass::half_t, + cutlass::layout::RowMajor, + cutlass::half_t, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm61.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm61.cu new file mode 100644 index 0000000000000000000000000000000000000000..c042b5b95dc515b3589b2088d4e7f2dcd2ed9275 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm61.cu @@ -0,0 +1,198 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma_simt.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM61_warp_gemm_int8_col_row, col_row_8x4x8_1x1x4) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 4> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<8, 4, 8>, + int8_t, + cutlass::layout::ColumnMajorInterleaved<4>, + int8_t, + cutlass::layout::RowMajorInterleaved<4>, + int, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM61_warp_gemm_int8_col_row, col_row_8x4x4_1x1x4) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<1, 1, 4> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<8, 4, 8>, + int8_t, + cutlass::layout::ColumnMajorInterleaved<4>, + int8_t, + cutlass::layout::RowMajorInterleaved<4>, + int, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM61_warp_gemm_int8_col_row, col_row_16x4x4_2x1x4) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 1, 4> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<16, 4, 4>, + int8_t, + cutlass::layout::ColumnMajorInterleaved<4>, + int8_t, + cutlass::layout::RowMajorInterleaved<4>, + int, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM61_warp_gemm_int8_col_row, col_row_16x4x4_2x2x4) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<2, 2, 4> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<16, 8, 4>, + int8_t, + cutlass::layout::ColumnMajorInterleaved<4>, + int8_t, + cutlass::layout::RowMajorInterleaved<4>, + int, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM61_warp_gemm_int8_col_row, col_row_32x16x4_4x4x4) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 4> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<32, 16, 16>, + int8_t, + cutlass::layout::ColumnMajorInterleaved<4>, + int8_t, + cutlass::layout::RowMajorInterleaved<4>, + int, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + + +TEST(SM61_warp_gemm_int8_col_row, col_row_128x64x4_16x16x4) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<16, 16, 4> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<128, 64, 4>, + int8_t, + cutlass::layout::ColumnMajorInterleaved<4>, + int8_t, + cutlass::layout::RowMajorInterleaved<4>, + int, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM61_warp_gemm_int8_col_row, col_row_64x64x4_4x4x4) { + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape<8, 4>, + cutlass::layout::ColumnMajorInterleaved<2>, + cutlass::gemm::GemmShape<4, 4, 4> + >; + + using Mma = cutlass::gemm::warp::MmaSimt< + cutlass::gemm::GemmShape<64, 64, 8>, + int8_t, + cutlass::layout::ColumnMajorInterleaved<4>, + int8_t, + cutlass::layout::RowMajorInterleaved<4>, + int, + cutlass::layout::ColumnMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..6785ddbf4f05219b65cf907d7e63004fcf62f125 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm70.cu @@ -0,0 +1,295 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/mma_tensor_op_sm70.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED) + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_warp_gemm_tensor_op_congruous, 128x128x16_64x64x16_16x16x4) { + + using Shape = cutlass::gemm::GemmShape<64, 64, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous::value>; + using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous::value>; + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + cutlass::layout::ColumnMajor, + ElementB, + cutlass::layout::RowMajor, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_gemm_tensor_op_congruous, 128x64x4_64x64x4_16x16x4) { + + using Shape = cutlass::gemm::GemmShape<64, 64, 4>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous::value>; + using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous::value>; + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + cutlass::layout::ColumnMajor, + ElementB, + cutlass::layout::RowMajor, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_gemm_tensor_op_congruous, 128x128x4_32x32x4_16x16x4) { + + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous::value>; + using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous::value>; + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + cutlass::layout::ColumnMajor, + ElementB, + cutlass::layout::RowMajor, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_gemm_tensor_op_crosswise, 64x64x32_64x64x32_16x16x4) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + cutlass::layout::RowMajor, + ElementB, + cutlass::layout::ColumnMajor, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed >().run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM70_warp_gemm_volta_tensor_op_canonical_f32_row_col, 64x64x16_64x64x4_8x8x4) { + + using Shape = cutlass::gemm::GemmShape<64, 64, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + cutlass::layout::RowMajor, + ElementB, + cutlass::layout::ColumnMajor, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed >() + .run(); +} + +TEST(SM70_warp_gemm_volta_tensor_op_canonical_f32_col_row, 64x64x16_64x64x4_8x8x4) { + + using Shape = cutlass::gemm::GemmShape<64, 64, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + Policy + >; + + test::gemm::warp::Testbed >() + .run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#endif // CUTLASS_ARCH_MMA_SM70_SUPPORTED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..d1ac78b2006fc85aaf1f3778bff3be5d409d3f5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm75.cu @@ -0,0 +1,858 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_congruous_f16, 128x128x8_32x128x8_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 128, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_congruous_f16, 128x128x32_64x64x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_congruous_f16, 128x128x32_32x32x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x32_64x64x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x32_64x32x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x32_32x32x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x32_32x16x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x32_16x16x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<16, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x64_64x64x64_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x64_64x32x64_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x64_32x32x64_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x64_32x16x64_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_f16, 128x128x64_16x16x64_16x8x8) { + using Shape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST(SM75_warp_gemm_tensor_op_crosswise_i8, 128x128x64_64x64x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i8, 128x128x64_64x32x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i8, 128x128x64_32x32x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i8, 128x128x64_32x16x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i8, 128x128x64_16x16x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i8, 128x128x64_64x64x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i8, 128x128x64_64x32x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i8, 128x128x64_32x32x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i8, 128x128x64_32x16x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i8, 128x128x64_16x16x64_8x8x16) { + using Shape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i4, 128x128x128_64x64x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i4, 128x128x128_64x32x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i4, 128x128x128_32x32x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i4, 128x128x128_32x16x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_i4, 128x128x128_16x16x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<16, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i4, 128x128x128_64x64x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i4, 128x128x128_64x32x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i4, 128x128x128_32x32x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i4, 128x128x128_32x16x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_interleaved_i4, 128x128x128_16x16x128_8x8x32) { + using Shape = cutlass::gemm::GemmShape<16, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_b1, 128x128x512_64x64x512_8x8x128) { + using Shape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpXorPopc>::Type; + + test::gemm::warp::Testbed, + cutlass::arch::OpXorPopc>() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_b1, 128x128x512_64x32x512_8x8x128) { + using Shape = cutlass::gemm::GemmShape<64, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpXorPopc>::Type; + + test::gemm::warp::Testbed, + cutlass::arch::OpXorPopc>() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_b1, 128x128x512_32x32x512_8x8x128) { + using Shape = cutlass::gemm::GemmShape<32, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpXorPopc>::Type; + + test::gemm::warp::Testbed, + cutlass::arch::OpXorPopc>() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_b1, 128x128x512_32x16x512_8x8x128) { + using Shape = cutlass::gemm::GemmShape<32, 16, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpXorPopc>::Type; + + test::gemm::warp::Testbed, + cutlass::arch::OpXorPopc>() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_warp_gemm_tensor_op_crosswise_b1, 128x128x512_16x16x512_8x8x128) { + using Shape = cutlass::gemm::GemmShape<16, 16, 512>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpXorPopc>::Type; + + test::gemm::warp::Testbed, + cutlass::arch::OpXorPopc>() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..4034767d69d32a0c6a8b9b2735a3fa6413385945 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm80.cu @@ -0,0 +1,1863 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_64x64x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_64x32x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_32x32x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_32x16x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_16x16x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<16, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_64x64x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_64x32x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_32x32x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_32x16x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_16x16x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_64x64x16_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_64x32x16_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_32x32x16_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_32x16x16_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_16x16x16_16x8x8) { + using Shape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_64x64x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_64x32x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_32x32x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_32x16x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_16x16x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<16, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x32_64x64x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x32_32x32x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x64_64x64x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x64_32x32x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x16_64x64x16_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x16_32x32x16_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x32_64x64x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x32_32x32x32_16x8x8) { + using Shape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_tn, tf32_round_128x128x32_64x64x32_16x8x8) { + + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = float; + using ElementC = float; + + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::TransformTestbed >() + .run(); +} + +TEST(SM80_warp_gemm_tensor_op_nt, tf32_round_128x128x32_64x64x32_16x8x8) { + + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = float; + using ElementC = float; + + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::TransformTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_16x16x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x16x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x32x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x32x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x64x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_16x16x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x16x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x32x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x32x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x64x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_64x64x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_64x32x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_32x32x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_32x16x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_16x16x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<16, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_64x64x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_64x32x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_32x32x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_32x16x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_16x16x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<16, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = int8_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_64x64x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_64x32x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_32x32x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_32x16x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_16x16x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<16, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_64x64x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_64x32x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_32x32x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_32x16x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 16, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_16x16x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<16, 16, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_64x64x512_16x8x256) { + using Shape = cutlass::gemm::GemmShape<64, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_64x32x512_16x8x256) { + using Shape = cutlass::gemm::GemmShape<64, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_32x32x512_16x8x256) { + using Shape = cutlass::gemm::GemmShape<32, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_32x16x512_16x8x256) { + using Shape = cutlass::gemm::GemmShape<32, 16, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_16x16x512_16x8x256) { + using Shape = cutlass::gemm::GemmShape<16, 16, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 512>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_64x64x1024_16x8x256) { + using Shape = cutlass::gemm::GemmShape<64, 64, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_64x32x1024_16x8x256) { + using Shape = cutlass::gemm::GemmShape<64, 32, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_32x32x1024_16x8x256) { + using Shape = cutlass::gemm::GemmShape<32, 32, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_32x16x1024_16x8x256) { + using Shape = cutlass::gemm::GemmShape<32, 16, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_16x16x1024_16x8x256) { + using Shape = cutlass::gemm::GemmShape<16, 16, 1024>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>; + using Element = cutlass::uint1b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 1024>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST(SM80_warp_gemm_tensor_op_congruous_f64, 16x16x4_16x16x4_8x8x4) { + using Shape = cutlass::gemm::GemmShape<16, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x16x4_32x16x4_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x32x4_32x32x4_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x64x4_32x64x4_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 64, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 16x16x16_16x16x16_8x8x4) { + using Shape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 32x32x16_32x32x16_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 64x32x16_64x32x16_8x8x4) { + using Shape = cutlass::gemm::GemmShape<64, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 32x64x16_32x64x16_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_16x16x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<16, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_32x16x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_32x32x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_64x32x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_64x64x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = cutlass::int4b_t; + using ElementC = int; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_canonical_f64_row_col, 32x32x8_64x32x8_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_canonical_f64_col_row, 32x32x8_64x32x8_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_canonical_tf32_row_col, 32x32x8_64x32x8_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 32, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_tensor_op_canonical_tf32_col_row, 32x32x8_64x32x8_8x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 32, 8>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm90.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm90.cu new file mode 100644 index 0000000000000000000000000000000000000000..6c2cc78bbc583cbfc755d8cab7e5f6c9757d467a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sm90.cu @@ -0,0 +1,204 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM with Hopper FP64 +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) +TEST(SM90_warp_gemm_tensor_op_congruous_f64, 16x16x4_16x16x4_16x8x4) { + using Shape = cutlass::gemm::GemmShape<16, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_warp_gemm_tensor_op_congruous_f64, 32x16x4_32x16x4_16x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 16, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_warp_gemm_tensor_op_congruous_f64, 32x32x4_32x32x4_16x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 32, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_warp_gemm_tensor_op_congruous_f64, 32x64x4_32x64x4_16x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 64, 4>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_warp_gemm_tensor_op_crosswise_f64, 16x16x16_16x16x16_16x8x4) { + using Shape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_warp_gemm_tensor_op_crosswise_f64, 32x32x16_32x32x16_16x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_warp_gemm_tensor_op_crosswise_f64, 64x32x16_64x32x16_16x8x4) { + using Shape = cutlass::gemm::GemmShape<64, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM90_warp_gemm_tensor_op_crosswise_f64, 32x64x16_32x64x16_16x8x4) { + using Shape = cutlass::gemm::GemmShape<32, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>; + using Element = double; + using ElementC = double; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type; + + test::gemm::warp::Testbed >() + .run(); +} +//////////////////////////////////////////////////////////////////////////////// +#endif // if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sparse_sm80.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sparse_sm80.cu new file mode 100644 index 0000000000000000000000000000000000000000..af87ee7a14dd07428c58e6336e66c41712efe00f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/gemm_sparse_sm80.cu @@ -0,0 +1,1107 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_sparse_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 128x128x64_64x64x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 128x128x64_64x32x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 128x128x64_32x64x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 128x128x64_32x32x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 128x128x64_32x16x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 128x64x128_64x32x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 64x128x128_32x64x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 64x64x128_32x32x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_f16, 64x32x128_32x16x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_f16, 128x128x64_64x64x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_f16, 128x128x64_64x32x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_f16, 128x128x64_32x64x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_f16, 128x128x64_32x32x64_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_f16, 128x64x128_64x32x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_f16, 64x128x128_32x64x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_f16, 64x64x128_32x32x128_16x8x32) { + using Shape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; + using Element = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 128x128x128_64x64x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 128x128x128_64x32x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 128x128x128_32x64x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 128x128x128_32x32x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 32, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 128x128x128_32x16x128_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 16, 128>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 64>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 128x64x256_64x32x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<64, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 64x128x256_32x64x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 64x64x256_32x32x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s8, 64x32x256_32x16x256_16x8x64) { + using Shape = cutlass::gemm::GemmShape<32, 16, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; + using Element = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 128x128x32_64x64x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 128x128x32_64x32x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 128x128x32_32x64x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 128x128x32_32x32x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 128x128x32_32x16x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 16, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 16>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 128x64x256_64x32x256_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 64x128x64_32x64x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 64x64x64_32x32x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_tf32, 64x32x64_32x16x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 16, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_tf32, 128x128x32_64x64x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_tf32, 128x128x32_64x32x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_tf32, 128x128x32_32x64x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_tf32, 128x128x32_32x32x32_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_tf32, 128x64x64_64x32x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<64, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_tf32, 64x128x64_32x64x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 64, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_congruous_tf32, 64x64x64_32x32x64_16x8x16) { + using Shape = cutlass::gemm::GemmShape<32, 32, 64>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; + using Element = cutlass::tfloat32_t; + using ElementC = float; + using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 32>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 128x128x256_64x64x256_16x8x128) { + using Shape = cutlass::gemm::GemmShape<64, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 128x128x256_64x32x256_16x8x128) { + using Shape = cutlass::gemm::GemmShape<64, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 128x128x256_32x64x256_16x8x128) { + using Shape = cutlass::gemm::GemmShape<32, 64, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 128x128x256_32x32x256_16x8x128) { + using Shape = cutlass::gemm::GemmShape<32, 32, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 128x128x256_32x16x256_16x8x128) { + using Shape = cutlass::gemm::GemmShape<32, 16, 256>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 128>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 128x64x512_64x32x512_16x8x128) { + using Shape = cutlass::gemm::GemmShape<64, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 64x128x512_32x64x512_16x8x128) { + using Shape = cutlass::gemm::GemmShape<32, 64, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 64x64x512_32x32x512_16x8x128) { + using Shape = cutlass::gemm::GemmShape<32, 32, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +TEST(SM80_warp_gemm_sparse_tensor_op_crosswise_s4, 64x32x512_32x16x512_16x8x128) { + using Shape = cutlass::gemm::GemmShape<32, 16, 512>; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; + using Element = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + cutlass::sizeof_bits::value, 256>; + + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, + cutlass::layout::RowMajor>::Type; + + test::gemm::warp::SparseTestbed >() + .run(); +} + +//////////////////////////////////////////////////////////////////////////////// + +#endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..fe62ce4446024152314787cdeadca37ef5f27f1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/testbed.h @@ -0,0 +1,1543 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/aligned_buffer.h" +#include "cutlass/numeric_types.h" +#include "cutlass/subbyte_reference.h" +#include "cutlass/platform/platform.h" +#include "cutlass/arch/arch.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/distribution.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/gemm_complex.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/host_reorder.h" +#include "cutlass/util/host_uncompress.h" + +namespace test { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test kernel +template +__global__ void kernel( + typename Mma::ElementC *output_C, + typename Mma::ElementA const *input_A, + typename Mma::ElementB const *input_B, + typename Mma::ElementC const *input_C, + int iterations = 1) { + + // Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers. + __shared__ cutlass::AlignedBuffer< + typename Mma::ElementA, ThreadblockShape::kM * ThreadblockShape::kK> smem_buffer_A; + + __shared__ cutlass::AlignedBuffer< + typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B; + + if (threadIdx.x == 0) { + typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data(); + #pragma unroll 1 + for (int i = 0; i < smem_buffer_A.size(); ++i) { + cutlass::ReferenceFactory::get(smem_ptr_A, i) = + cutlass::ReferenceFactory::type>::get(input_A, i); + } + + typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data(); + #pragma unroll 1 + for (int i = 0; i < smem_buffer_B.size(); ++i) { + cutlass::ReferenceFactory::get(smem_ptr_B, i) = + cutlass::ReferenceFactory::type>::get(input_B, i); + } + } + + __syncthreads(); + + // + // Construct warp-level matrix product + // + + using FragmentA = typename Mma::FragmentA; + using FragmentB = typename Mma::FragmentB; + using FragmentC = typename Mma::FragmentC; + + typename Mma::LayoutA layout_A = Mma::LayoutA::packed({ThreadblockShape::kM, ThreadblockShape::kK}); + typename Mma::LayoutB layout_B = Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN}); + typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN}); + + typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId()); + + typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId()); + + FragmentA frag_A; + FragmentB frag_B; + + FragmentC accum; + + Mma mma; + + accum.clear(); + + CUTLASS_PRAGMA_NO_UNROLL + for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < ThreadblockShape::kK; + k += Mma::Policy::MmaShape::kK) { + iter_A.load(frag_A); + iter_B.load(frag_B); + + ++iter_A; + ++iter_B; + + mma(accum, frag_A, frag_B, accum); + } + } + + typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId()); + + iter_C.store(accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Warp-level matrix multiply-accumulate + typename Mma_, + /// Size of threadblock-scoped shape used to store SMEM + typename ThreadblockShape_, + /// The inner product operation performed by GEMM + typename Operator_ = cutlass::arch::OpMultiplyAdd +> +struct Testbed { + + /// Thread-level matrix multiply-accumulate operator + using Mma = Mma_; + using ThreadblockShape = ThreadblockShape_; + using Operator = Operator_; + + using Shape = typename Mma::Shape; + using ElementA = typename Mma::ElementA; + using LayoutA = typename Mma::LayoutA; + using ElementB = typename Mma::ElementB; + using LayoutB = typename Mma::LayoutB; + using ElementC = typename Mma::ElementC; + using LayoutC = typename Mma::LayoutC; + + // + // Data members + // + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D_computed; + cutlass::HostTensor tensor_D_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed() { + + tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK)); + tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN)); + tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.major == 9) { + // NVIDIA Hopper drops support for several data types + if ( + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8) { + + return false; + } + } + + return true; + } + + + /// Runs the test + bool run( + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + + if (!sufficient()) { + return true; + } + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + + cutlass::reference::host::BlockFillRandomUniform(tensor_A.host_data(), + tensor_A.capacity(), seed, scope_max, scope_min, 0); + + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_A.host_data(), + tensor_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + + cutlass::reference::host::BlockFillRandomUniform(tensor_B.host_data(), + tensor_B.capacity(), seed, scope_max, scope_min, 0); + + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_B.host_data(), + tensor_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_B.host_view()); + } else { + return false; + } + + cutlass::reference::host::TensorFill( + tensor_C.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_computed.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_reference.host_view(), + ElementC(0) + ); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D_computed.sync_device(); + + // launch kernel + kernel<<< dim3(1, 1), dim3(32, 1, 1) >>>( + tensor_D_computed.device_data(), + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data()); + + // verify no errors + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); + if (result != cudaSuccess) { + return false; + } + + tensor_D_computed.sync_host(); + + // + // Reference implementation + // + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + {Shape::kM, Shape::kN, ThreadblockShape::kK}, + ElementC(1), + tensor_A.host_ref(), + tensor_B.host_ref(), + ElementC(0), + tensor_D_reference.host_ref() + ); + + // + // Verify equivalence + // + + // compare + bool passed = cutlass::reference::host::TensorEquals( + tensor_D_computed.host_view(), + tensor_D_reference.host_view() + ); + + EXPECT_TRUE(passed); + + if (!passed) { + + cutlass::TensorView tensor_A_physical( + tensor_A.host_data(), + tensor_A.stride()[0], + tensor_A.extent()); + + cutlass::TensorView tensor_B_physical( + tensor_B.host_data(), + tensor_B.stride()[0], + tensor_B.extent()); + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout + << "A:\n" << tensor_A.host_view() << "\n\n" + << "A(physical - stride: " << tensor_A.stride()[0] + << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n"; + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout + << "B:\n" << tensor_B.host_view() << "\n\n" + << "B(physical - stride: " << tensor_B.stride()[0] + << ", extent: " << tensor_B.extent() << "):\n" << tensor_B_physical << "\n\n"; + + std::cout + << "C:\n" << tensor_C.host_view() << "\n\n" + << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" + << "Computed:\n" << tensor_D_computed.host_view() << std::endl; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Warp-level matrix multiply-accumulate + typename Mma_, + /// Size of threadblock-scoped shape used to store SMEM + typename ThreadblockShape_ +> +struct TestbedComplex { + + /// Thread-level matrix multiply-accumulate operator + using Mma = Mma_; + using ThreadblockShape = ThreadblockShape_; + + using Shape = typename Mma::Shape; + using ElementA = typename Mma::ElementA; + using LayoutA = typename Mma::LayoutA; + using ElementB = typename Mma::ElementB; + using LayoutB = typename Mma::LayoutB; + using ElementC = typename Mma::ElementC; + using LayoutC = typename Mma::LayoutC; + + // + // Data members + // + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D_computed; + cutlass::HostTensor tensor_D_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + TestbedComplex() { + + tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK)); + tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN)); + tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.major == 9) { + // NVIDIA Hopper drops support for several data types + if ( + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8) { + + return false; + } + } + + return true; + } + + /// Runs the test + bool run( + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + + if (!sufficient()) { + return true; + } + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform(tensor_A.host_view(), + seed, 8, -8, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_A.host_data(), + tensor_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform(tensor_B.host_view(), + seed + 16, 8, -8, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_B.host_data(), + tensor_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_B.host_view()); + } else { + return false; + } + + cutlass::reference::host::TensorFill( + tensor_C.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_computed.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_reference.host_view(), + ElementC(0) + ); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D_computed.sync_device(); + + // launch kernel + kernel<<< dim3(1, 1), dim3(32, 1, 1) >>>( + tensor_D_computed.device_data(), + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data()); + + // verify no errors + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); + if (result != cudaSuccess) { + return false; + } + + tensor_D_computed.sync_host(); + + // + // Reference implementation + // + + cutlass::reference::host::GemmComplex( + {Shape::kM, Shape::kN, ThreadblockShape::kK}, + ElementC(1), + tensor_A.host_ref(), + Mma::kTransformA, + tensor_B.host_ref(), + Mma::kTransformB, + ElementC(0), + tensor_C.host_ref(), + tensor_D_reference.host_ref() + ); + + // + // Verify equivalence + // + + // compare + bool passed = cutlass::reference::host::TensorEquals( + tensor_D_computed.host_view(), + tensor_D_reference.host_view() + ); + + EXPECT_TRUE(passed); + + if (!passed) { + + cutlass::TensorView tensor_A_physical( + tensor_A.host_data(), + tensor_A.stride()[0], + tensor_A.extent()); + + cutlass::TensorView tensor_B_physical( + tensor_B.host_data(), + tensor_B.stride()[0], + tensor_B.extent()); + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout + << "A:\n" << tensor_A.host_view() << "\n\n" + << "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n"; + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout + << "B:\n" << tensor_B.host_view() << "\n\n" + << "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() <<"):\n" << tensor_B_physical << "\n\n"; + + std::cout + << "C:\n" << tensor_C.host_view() << "\n\n" + << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" + << "Computed:\n" << tensor_D_computed.host_view() << std::endl; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test kernel +template +__global__ void kernel_transform( + typename Mma::ElementC *output_C, + typename Mma::ElementA const *input_A, + typename Mma::ElementB const *input_B, + typename Mma::ElementC const *input_C, + int iterations = 1) { + + // Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers. + __shared__ cutlass::AlignedBuffer< + typename Mma::ElementA, ThreadblockShape::kM * ThreadblockShape::kK> smem_buffer_A; + + __shared__ cutlass::AlignedBuffer< + typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B; + + if (threadIdx.x == 0) { + typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data(); + #pragma unroll 1 + for (int i = 0; i < smem_buffer_A.size(); ++i) { + cutlass::ReferenceFactory::get(smem_ptr_A, i) = + cutlass::ReferenceFactory::type>::get(input_A, i); + } + + typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data(); + #pragma unroll 1 + for (int i = 0; i < smem_buffer_B.size(); ++i) { + cutlass::ReferenceFactory::get(smem_ptr_B, i) = + cutlass::ReferenceFactory::type>::get(input_B, i); + } + } + + __syncthreads(); + + // + // Construct warp-level matrix product + // + + using FragmentA = typename Mma::FragmentA; + using FragmentB = typename Mma::FragmentB; + using FragmentC = typename Mma::FragmentC; + + using TransformedFragmentA = typename Mma::TransformedFragmentA; + using TransformedFragmentB = typename Mma::TransformedFragmentB; + + typename Mma::LayoutA layout_A = Mma::LayoutA::packed({ThreadblockShape::kM, ThreadblockShape::kK}); + typename Mma::LayoutB layout_B = Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN}); + typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN}); + + typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId()); + + typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId()); + + FragmentA loaded_frag_A; + FragmentB loaded_frag_B; + TransformedFragmentA transformed_frag_A; + TransformedFragmentB transformed_frag_B; + + FragmentC accum; + + Mma mma; + + accum.clear(); + + CUTLASS_PRAGMA_NO_UNROLL + for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < ThreadblockShape::kK; + k += Mma::Policy::MmaShape::kK) { + iter_A.load(loaded_frag_A); + iter_B.load(loaded_frag_B); + + ++iter_A; + ++iter_B; + + mma.transform(transformed_frag_A, transformed_frag_B, loaded_frag_A, + loaded_frag_B); + + mma(accum, transformed_frag_A, transformed_frag_B, accum); + } + } + + typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId()); + + iter_C.store(accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Warp-level matrix multiply-accumulate + typename Mma_, + /// Size of threadblock-scoped shape used to store SMEM + typename ThreadblockShape_, + /// The innter product operation performed by GEMM + typename Operator_ = cutlass::arch::OpMultiplyAdd +> +struct TransformTestbed { + + /// Thread-level matrix multiply-accumulate operator + using Mma = Mma_; + using ThreadblockShape = ThreadblockShape_; + using Operator = Operator_; + + using Shape = typename Mma::Shape; + using ElementA = typename Mma::ElementA; + using LayoutA = typename Mma::LayoutA; + using ElementB = typename Mma::ElementB; + using LayoutB = typename Mma::LayoutB; + using ElementC = typename Mma::ElementC; + using LayoutC = typename Mma::LayoutC; + + // + // Data members + // + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D_computed; + cutlass::HostTensor tensor_D_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + TransformTestbed() { + + tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK)); + tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN)); + tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.major == 9) { + // NVIDIA Hopper drops support for several data types + if ( + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8) { + + return false; + } + } + + return true; + } + + /// Runs the test + bool run( + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + + if (!sufficient()) { + return true; + } + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + tensor_A.host_view(), seed, scope_max, scope_min, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_A.host_data(), + tensor_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + tensor_B.host_view(), seed + 16, scope_max, scope_min, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_B.host_data(), + tensor_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_B.host_view()); + } else { + return false; + } + + cutlass::reference::host::TensorFill( + tensor_C.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_computed.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_reference.host_view(), + ElementC(0) + ); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D_computed.sync_device(); + + // launch kernel + kernel_transform<<>>( + tensor_D_computed.device_data(), tensor_A.device_data(), + tensor_B.device_data(), tensor_C.device_data()); + + // verify no errors + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); + if (result != cudaSuccess) { + return false; + } + + tensor_D_computed.sync_host(); + + // + // Reference implementation + // + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + {Shape::kM, Shape::kN, ThreadblockShape::kK}, + ElementC(1), + tensor_A.host_ref(), + tensor_B.host_ref(), + ElementC(0), + tensor_D_reference.host_ref() + ); + + // + // Verify equivalence + // + + // compare + bool passed = cutlass::reference::host::TensorEquals( + tensor_D_computed.host_view(), + tensor_D_reference.host_view() + ); + + EXPECT_TRUE(passed); + + if (!passed) { + + cutlass::TensorView tensor_A_physical( + tensor_A.host_data(), + tensor_A.stride()[0], + tensor_A.extent()); + + cutlass::TensorView tensor_B_physical( + tensor_B.host_data(), + tensor_B.stride()[0], + tensor_B.extent()); + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout + << "A:\n" << tensor_A.host_view() << "\n\n" + << "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n"; + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout + << "B:\n" << tensor_B.host_view() << "\n\n" + << "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() << "):\n" << tensor_B_physical << "\n\n"; + + std::cout + << "C:\n" << tensor_C.host_view() << "\n\n" + << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" + << "Computed:\n" << tensor_D_computed.host_view() << std::endl; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Warp-level matrix multiply-accumulate + typename Mma_, + /// Size of threadblock-scoped shape used to store SMEM + typename ThreadblockShape_ +> +struct TransformedTestbedComplex { + + /// Thread-level matrix multiply-accumulate operator + using Mma = Mma_; + using ThreadblockShape = ThreadblockShape_; + + using Shape = typename Mma::Shape; + using ElementA = typename Mma::ElementA; + using LayoutA = typename Mma::LayoutA; + using ElementB = typename Mma::ElementB; + using LayoutB = typename Mma::LayoutB; + using ElementC = typename Mma::ElementC; + using LayoutC = typename Mma::LayoutC; + + // + // Data members + // + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D_computed; + cutlass::HostTensor tensor_D_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + TransformedTestbedComplex() { + + tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK)); + tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN)); + tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.major == 9) { + // NVIDIA Hopper drops support for several data types + if ( + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8) { + + return false; + } + } + + return true; + } + + /// Runs the test + bool run( + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { + + if (!sufficient()) { + return true; + } + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform(tensor_A.host_view(), + seed, 8, -8, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_A.host_data(), + tensor_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform(tensor_B.host_view(), + seed + 16, 8, -8, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_B.host_data(), + tensor_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_B.host_view()); + } else { + return false; + } + + cutlass::reference::host::TensorFill( + tensor_C.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_computed.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_reference.host_view(), + ElementC(0) + ); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D_computed.sync_device(); + + // launch kernel + kernel_transform<<< dim3(1, 1), dim3(32, 1, 1) >>>( + tensor_D_computed.device_data(), + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data()); + + // verify no errors + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); + if (result != cudaSuccess) { + return false; + } + + tensor_D_computed.sync_host(); + + // + // Reference implementation + // + + cutlass::reference::host::GemmComplex( + {Shape::kM, Shape::kN, ThreadblockShape::kK}, + ElementC(1), + tensor_A.host_ref(), + Mma::kTransformA, + tensor_B.host_ref(), + Mma::kTransformB, + ElementC(0), + tensor_C.host_ref(), + tensor_D_reference.host_ref() + ); + + // + // Verify equivalence + // + + // compare + bool passed = cutlass::reference::host::TensorEquals( + tensor_D_computed.host_view(), + tensor_D_reference.host_view() + ); + + EXPECT_TRUE(passed); + + if (!passed) { + + cutlass::TensorView tensor_A_physical( + tensor_A.host_data(), + tensor_A.stride()[0], + tensor_A.extent()); + + cutlass::TensorView tensor_B_physical( + tensor_B.host_data(), + tensor_B.stride()[0], + tensor_B.extent()); + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout + << "A:\n" << tensor_A.host_view() << "\n\n" + << "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n"; + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout + << "B:\n" << tensor_B.host_view() << "\n\n" + << "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() <<"):\n" << tensor_B_physical << "\n\n"; + + std::cout + << "C:\n" << tensor_C.host_view() << "\n\n" + << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" + << "Computed:\n" << tensor_D_computed.host_view() << std::endl; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test kernel +template +__global__ void sparse_kernel( + typename Mma::ElementC *output_C, + typename Mma::ElementA const *input_A, + typename Mma::ElementB const *input_B, + typename Mma::ElementC const *input_C, + typename Mma::ElementE const *input_E, + int iterations = 1) { + + // Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers. + __shared__ cutlass::AlignedBuffer + smem_buffer_A; + + __shared__ cutlass::AlignedBuffer< + typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B; + + __shared__ cutlass::AlignedBuffer< + typename Mma::ElementE, Mma::Shape::kM * Mma::Shape::kK / + Mma::kSparse / Mma::kElementsPerElementE> + smem_buffer_E; + + __syncthreads(); + + if (threadIdx.x == 0) { + typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data(); + #pragma unroll 1 + for (int i = 0; i < smem_buffer_A.size(); ++i) { + cutlass::ReferenceFactory::get(smem_ptr_A, i) = + cutlass::ReferenceFactory::type>::get(input_A, i); + } + + typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data(); + #pragma unroll 1 + for (int i = 0; i < smem_buffer_B.size(); ++i) { + cutlass::ReferenceFactory::get(smem_ptr_B, i) = + cutlass::ReferenceFactory::type>::get(input_B, i); + } + + typename Mma::ElementE *smem_ptr_E = smem_buffer_E.data(); + #pragma unroll 1 + for (int i = 0; i < smem_buffer_E.size(); ++i) { + cutlass::ReferenceFactory::get(smem_ptr_E, i) = + cutlass::ReferenceFactory::type>::get(input_E, i); + } + } + + __syncthreads(); + + // + // Construct warp-level matrix product + // + + using FragmentA = typename Mma::FragmentA; + using FragmentB = typename Mma::FragmentB; + using FragmentC = typename Mma::FragmentC; + using FragmentE = typename Mma::FragmentE; + + typename Mma::LayoutA layout_A = Mma::LayoutA::packed( + {ThreadblockShape::kM, ThreadblockShape::kK / Mma::kSparse}); + typename Mma::LayoutB layout_B = + Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN}); + typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN}); + typename Mma::LayoutE layout_E = + Mma::LayoutE::packed({Mma::Shape::kM * Mma::kInterleaved, + Mma::Shape::kK / Mma::kSparse / + Mma::kElementsPerElementE / Mma::kInterleaved}); + + typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId()); + + typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId()); + + typename Mma::IteratorE iter_E({smem_buffer_E.data(), layout_E}, cutlass::arch::LaneId()); + + FragmentA frag_A; + FragmentB frag_B; + + FragmentC accum; + + FragmentE frag_E; + + Mma mma; + + accum.clear(); + + CUTLASS_PRAGMA_NO_UNROLL + for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < ThreadblockShape::kK; + k += Mma::Policy::MmaShape::kK) { + iter_A.load(frag_A); + iter_B.load(frag_B); + iter_E.load(frag_E); + + ++iter_A; + ++iter_B; + ++iter_E; + + mma(accum, frag_A, frag_B, accum, frag_E); + } + } + + typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId()); + + iter_C.store(accum); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product +template < + /// Warp-level matrix multiply-accumulate + typename Mma_, + /// Size of threadblock-scoped shape used to store SMEM + typename ThreadblockShape_, + /// The innter product operation performed by GEMM + typename Operator_ = cutlass::arch::OpMultiplyAdd +> +struct SparseTestbed { + + /// Thread-level matrix multiply-accumulate operator + using Mma = Mma_; + using ThreadblockShape = ThreadblockShape_; + using Operator = Operator_; + + using Shape = typename Mma::Shape; + using ElementA = typename Mma::ElementA; + using LayoutA = typename Mma::LayoutA; + using ElementB = typename Mma::ElementB; + using LayoutB = typename Mma::LayoutB; + using ElementC = typename Mma::ElementC; + using LayoutC = typename Mma::LayoutC; + + static int const Sparse = Mma::kSparse; + static int const MetaSizeInBits = Mma::kMetaSizeInBits; + static int const MaxID2 = Mma::kMaxID2; + static int const Interleaved = Mma::kInterleaved; + + using ElementE = typename Mma::ElementE; + + static int const ElementsPerElementE = Mma::kElementsPerElementE; + + using LayoutE = cutlass::layout::RowMajor; + using ReorderedLayoutE = + cutlass::layout::ColumnMajorInterleaved; + + // + // Data members + // + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_A_uncompressed; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D_computed; + cutlass::HostTensor tensor_D_reference; + cutlass::HostTensor tensor_E; + cutlass::HostTensor tensor_E_reordered; + + // + // Methods + // + + /// Allocates workspace in device memory + SparseTestbed() { + + tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, + ThreadblockShape::kK / Sparse)); + tensor_A_uncompressed.reset( + cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK)); + tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN)); + tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + tensor_E.reset(cutlass::make_Coord( + Shape::kM, Shape::kK / Sparse / ElementsPerElementE)); + tensor_E_reordered.reset(cutlass::make_Coord( + Shape::kM, Shape::kK / Sparse / ElementsPerElementE)); + } + + /// Returns true if the CUDA device is sufficient to execute the kernel. + bool sufficient() const { + + cudaDeviceProp properties; + int device_idx; + cudaError_t result = cudaGetDevice(&device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() API call failed."); + } + + result = cudaGetDeviceProperties(&properties, device_idx); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + if (properties.major == 9) { + // NVIDIA Hopper drops support for several data types + if ( + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8 || + cutlass::sizeof_bits::value < 8) { + + return false; + } + } + + return true; + } + + /// Runs the test + bool run( + cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind init_E = cutlass::Distribution::Uniform) { + + if (!sufficient()) { + return true; + } + + // + // initialize device memory + // + + if (init_A == cutlass::Distribution::Uniform) { + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + tensor_A.host_view(), seed, scope_max, scope_min, 0); + } else if (init_A == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_A.host_data(), + tensor_A.capacity()); + } else if (init_A == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_A.host_view()); + } else { + return false; + } + + if (init_B == cutlass::Distribution::Uniform) { + int scope_max = 8; + int scope_min = -8; + + if (cutlass::sizeof_bits::value == 4) { + scope_max = 2; + scope_min = -2; + } else if (cutlass::sizeof_bits::value == 1) { + scope_max = 2; + scope_min = 0; + } + + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomUniform( + tensor_B.host_view(), seed + 16, scope_max, scope_min, 0); + } else if (init_B == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(tensor_B.host_data(), + tensor_B.capacity()); + } else if (init_B == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(tensor_B.host_view()); + } else { + return false; + } + + cutlass::reference::host::TensorFill( + tensor_C.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_computed.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_reference.host_view(), + ElementC(0) + ); + + if (init_E == cutlass::Distribution::Uniform) { + uint64_t seed = 7; + cutlass::reference::host::TensorFillRandomSparseMeta( + tensor_E.host_view(), seed, MetaSizeInBits); + } else if (init_E == cutlass::Distribution::Identity) { + uint32_t content = (MaxID2 == 1) ? 0x44444444 : 0x4444; + cutlass::reference::host::TensorFill(tensor_E.host_view(), + (ElementE)(content)); + } else { + return false; + } + + cutlass::reorder_meta( + tensor_E_reordered.host_ref(), tensor_E.host_ref(), + {Shape::kM, Shape::kN, Shape::kK / Sparse / ElementsPerElementE}); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D_computed.sync_device(); + tensor_E_reordered.sync_device(); + + // launch kernel + sparse_kernel<<< dim3(1, 1), dim3(32, 1, 1) >>>( + tensor_D_computed.device_data(), + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data(), + tensor_E_reordered.device_data()); + + // verify no errors + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); + if (result != cudaSuccess) { + return false; + } + + tensor_D_computed.sync_host(); + + // + // Reference implementation + // + cutlass::uncompress(tensor_A_uncompressed.host_ref(), tensor_A.host_ref(), + tensor_E.host_ref(), Shape::kM, Shape::kK); + + cutlass::reference::host::Gemm + reference_gemm; + + reference_gemm( + {Shape::kM, Shape::kN, ThreadblockShape::kK}, + ElementC(1), + tensor_A_uncompressed.host_ref(), + tensor_B.host_ref(), + ElementC(0), + tensor_D_reference.host_ref() + ); + + // + // Verify equivalence + // + + // compare + bool passed = cutlass::reference::host::TensorEquals( + tensor_D_computed.host_view(), + tensor_D_reference.host_view() + ); + + EXPECT_TRUE(passed); + + if (!passed) { + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout << "A:\n" << tensor_A.host_view() << "\n\n"; + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout << "B:\n" << tensor_B.host_view() << "\n\n"; + + std::cout <<"cutlass::sizeof_bits::value = "<::value<<"\n"; + std::cout << "E:\n" << tensor_E.host_view() << "\n\n"; + + std::cout + << "C:\n" << tensor_C.host_view() << "\n\n" + << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" + << "Computed:\n" << tensor_D_computed.host_view() << "\n"; + } + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm70.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm70.cu new file mode 100644 index 0000000000000000000000000000000000000000..f2d6762983b79d76cca74576bf6c63235e2e0cf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm70.cu @@ -0,0 +1,688 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for warp-level wmma gemm +*/ +#include "cutlass/arch/wmma.h" + +#if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED) + +#include "../../common/cutlass_unit_test.h" +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_wmma_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +/// Test name format: SM[arch]_warp_wmma_[alayout]_[blayout]_[clayout]_[dtype].[threadblock_shape]_[warp_shape] + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////// f16 accumulation point wmma.mma ////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +//////////////// [START] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [START] ////////////////////// + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m16n16k16.f16.f16 +//////////////////////////////////////////////////////////// + +// 4 tests for {N,T}x{N,T}=>{T} +TEST(SM70_warp_wmma_row_col_row_f16, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_col_row_row_f16, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_row_row_row_f16, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_col_col_row_f16, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +// 4 tests for {N,T}x{N,T}=>{N} +TEST(SM70_warp_wmma_row_col_col_f16, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_col_row_col_f16, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_row_row_col_f16, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_col_col_col_f16, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::ColumnMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} +/////////// [END] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [END] /////////////////////////// + + + +TEST(SM70_warp_wmma_row_col_row_f16, 64x64x16_64x64x16_16x16x16) { + + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + + +TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_64x64x32_16x16x16) { + + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + + test::gemm::warp::Testbed >().run(); +} + + +TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_64x32x32_16x16x16) { + + using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_32x64x32_16x16x16) { + + using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_32x32x32_16x16x16) { + + using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_wmma_row_col_row_f16, 128x128x16_64x64x16_16x16x16) { + // Even though the test launches 128x128x16 CTA tile this test only verfies one warp + // , i.e., warp_0 of size 64x64x16 out of the four warps required to cover the CTA + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m32n8k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_row_col_row_f16, 32x8x16_32x8x16_32x8x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m8n32k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_row_col_row_f16, 8x32x16_8x32x16_32x8x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + + +//////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.col.row.m8n32k16.f16.f16 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_col_row_row_f16, 8x32x16_8x32x16_8x32x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_wmma_col_row_row_f16, 32x8x16_32x8x16_32x8x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = cutlass::half_t; + using LayoutA = cutlass::layout::ColumnMajor; + using LayoutB = cutlass::layout::RowMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////// f32 accumulation point wmma.mma ////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m16n16k16.f32.f32 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_row_col_row_f32, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_wmma_row_col_row_f32, 64x64x16_64x64x16_16x16x16) { + + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM70_warp_wmma_row_col_row_f32, 64x64x32_64x64x32_16x16x16) { + + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + + +TEST(SM70_warp_wmma_row_col_row_f32, 128x128x16_64x64x16_16x16x16) { + // Even though the test launches 128x128x16 CTA tile this test only verfies one warp + // , i.e., warp_0 of size 64x64x16 out of the four warps required to cover the CTA + using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + + +///////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m32n8k16.f32.f32 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_row_col_row_f32, 32x8x16_32x8x16_32x8x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + + +///////////////////////////////////////////////////////////// +/// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype +/// wmma.mma.sync.aligned.row.col.m8n32k16.f32.f32 +//////////////////////////////////////////////////////////// +TEST(SM70_warp_wmma_row_col_row_f32, 8x32x16_8x32x16_8x32x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + using ElementA = cutlass::half_t; + using ElementB = cutlass::half_t; + using ElementC = float; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, + LayoutA, + ElementB, LayoutB, + ElementC, + LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +#endif //CUTLASS_ARCH_WMMA_SM70_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm72.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm72.cu new file mode 100644 index 0000000000000000000000000000000000000000..eab1536ffeca583ca61cdf098be10ab049f3d137 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm72.cu @@ -0,0 +1,185 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM +*/ +#include "cutlass/arch/wmma.h" + +#if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED) + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_wmma_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////// Integer wmma.mma //////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// TODO: SM75 should be SM72, but the compilation breaks as SM72 shows up and runs on VOLTA +TEST(SM75_warp_wmma_row_col_s8, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = int8_t; + using ElementB = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM75_warp_wmma_row_col_s8, 32x8x16_32x8x16_32x8x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + using ElementA = int8_t; + using ElementB = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM75_warp_wmma_row_col_s8, 8x32x16_8x32x16_8x32x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + using ElementA = int8_t; + using ElementB = int8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM75_warp_wmma_row_col_u8, 16x16x16_16x16x16_16x16x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; + using ElementA = uint8_t; + using ElementB = uint8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM75_warp_wmma_row_col_u8, 32x8x16_32x8x16_32x8x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; + using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; + using ElementA = uint8_t; + using ElementB = uint8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} + +TEST(SM75_warp_wmma_row_col_u8, 8x32x16_8x32x16_8x32x16) { + // Threadblock and warp with just one native WMMA operation (most basic unit test) + using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; + using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; + using ElementA = uint8_t; + using ElementB = uint8_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); +} +#endif //CUTLASS_ARCH_WMMA_SM72_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm75.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm75.cu new file mode 100644 index 0000000000000000000000000000000000000000..81a98d4c1dc86d8e07fd41620b8bd968fe8e9474 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/gemm/warp/wmma_sm75.cu @@ -0,0 +1,169 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + + \brief Unit tests for thread-level GEMM +*/ +#include "cutlass/arch/wmma.h" + +#if defined(CUTLASS_ARCH_WMMA_SM75_ENABLED) + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/half.h" + +#include "cutlass/gemm/warp/default_mma_wmma_tensor_op.h" + +#include "cutlass/core_io.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include "testbed.h" +/////////////////////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////// SUBBYTE wmma.mma //////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////////////////// +TEST(SM75_warp_wmma_row_col_s4, 64x64x32_8x8x32_8x8x32) { + + using WarpShape = cutlass::gemm::GemmShape<8, 8, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using ElementA = cutlass::int4b_t; + using ElementB = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); + +} + +TEST(SM75_warp_wmma_row_col_s4, 64x64x32_64x64x32_8x8x32) { + + using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using ElementA = cutlass::int4b_t; + using ElementB = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); + +} + +TEST(SM75_warp_wmma_row_col_s4, 64x64x64_8x8x64_8x8x32) { + + using WarpShape = cutlass::gemm::GemmShape<8, 8, 64>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; + using ElementA = cutlass::int4b_t; + using ElementB = cutlass::int4b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC>::Type; + + test::gemm::warp::Testbed >().run(); + +} + +TEST(SM75_warp_wmma_row_col_b1, 64x64x128_8x8x128_8x8x128) { + + using WarpShape = cutlass::gemm::GemmShape<8, 8, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + using ElementA = cutlass::uint1b_t; + using ElementB = cutlass::uint1b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpXorPopc>::Type; + + test::gemm::warp::Testbed, cutlass::arch::OpXorPopc>().run(); + +} + +TEST(SM75_warp_wmma_row_col_b1, 64x64x128_64x64x128_8x8x128) { + + using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; + using ElementA = cutlass::uint1b_t; + using ElementB = cutlass::uint1b_t; + using ElementC = int32_t; + using LayoutA = cutlass::layout::RowMajor; + using LayoutB = cutlass::layout::ColumnMajor; + using LayoutC = cutlass::layout::RowMajor; + + using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + cutlass::arch::OpXorPopc>::Type; + + test::gemm::warp::Testbed, cutlass::arch::OpXorPopc>().run(); + +} +#endif //CUTLASS_ARCH_WMMA_SM75_ENABLED diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..73ae5bc60bd2585f1c2b6bd677d679d3f774d71f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/CMakeLists.txt @@ -0,0 +1,34 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_layout + matrix.cu + tensor.cu + tensor_nhwc.cu + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/matrix.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/matrix.cu new file mode 100644 index 0000000000000000000000000000000000000000..c603cedd0c4d676f05a22d72307646c238aa2db9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/matrix.cu @@ -0,0 +1,151 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +**************************************************************************************************/ +/*! \file +\brief unit tests for matrix layout +*/ + +#include "../common/cutlass_unit_test.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/matrix_coord.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +namespace test { +namespace layout { + void test_row_major_layout(int row_size, int column_size, int ldm) { + cutlass::layout::RowMajor row_major(ldm); + + // test pointer offset + for (int row_idx = 0; row_idx < row_size; row_idx++) { + for (int column_idx = 0; column_idx < column_size; column_idx++) { + cutlass::MatrixCoord matrix_coord(row_idx, column_idx); + auto ptr_offset = row_major(matrix_coord); + decltype(ptr_offset) reference_offset = row_idx * ldm + column_idx; + EXPECT_EQ(ptr_offset, reference_offset); + } + } + + // test stride + EXPECT_EQ(row_major.stride()[0], ldm); + + // test capacity + auto capacity = row_major.capacity(cutlass::MatrixCoord(row_size, column_size)); + decltype(capacity) reference_capacity = row_size * ldm; + EXPECT_EQ(capacity, reference_capacity); + + // test packed + auto packed = row_major.packed(cutlass::MatrixCoord(row_size, column_size)); + // the packed matrix's stride is the same with column size + EXPECT_EQ(packed.stride()[0], column_size); + } + + void test_column_major_layout(int row_size, int column_size, int ldm) { + cutlass::layout::ColumnMajor column_major(ldm); + + // test pointer offset + for (int row_idx = 0; row_idx < row_size; row_idx++) { + for (int column_idx = 0; column_idx < column_size; column_idx++) { + cutlass::MatrixCoord matrix_coord(row_idx, column_idx); + auto ptr_offset = column_major(matrix_coord); + decltype(ptr_offset) reference_offset = row_idx + column_idx * ldm; + EXPECT_EQ(ptr_offset, reference_offset); + } + } + + // test stride + EXPECT_EQ(column_major.stride()[0], ldm); + + // test capacity + auto capacity = column_major.capacity(cutlass::MatrixCoord(row_size, column_size)); + decltype(capacity) reference_capacity = column_size * ldm; + EXPECT_EQ(capacity, reference_capacity); + + // test packed + auto packed = column_major.packed(cutlass::MatrixCoord(row_size, column_size)); + // the packed matrix's stride is the same with row size + EXPECT_EQ(packed.stride()[0], row_size); + } + +} // namespace layout +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Layout_Matrix, row_major_32_53) { + int const row_size = 32; + int const column_size = 53; + int const ldm = 55; + test::layout::test_row_major_layout(row_size, column_size, ldm); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Layout_Matrix, column_major_32_53) { + int const row_size = 32; + int const column_size = 53; + int const ldm = 55; + test::layout::test_column_major_layout(row_size, column_size, ldm); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Layout_Matrix, general_matrix) { + + int M = 16; + int N = 16; + int interleave = 4; + + cutlass::layout::GeneralMatrix::TensorCoord extent = {M, N}; + + cutlass::layout::GeneralMatrix layout = + cutlass::layout::GeneralMatrix::packed( + extent, cutlass::layout::Matrix::kColumnMajor, interleave); + + cutlass::HostTensor tensor(extent); + + for (int m = 0; m < M; ++m) { + for (int n = 0; n < N; ++n) { + tensor.host_data(m * N + n) = m * N + n; + } + } + + cutlass::TensorView canonical({tensor.host_data(), layout}, extent); + + // Uncomment this to view + // + //std::cout << canonical << std::endl; + // +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/tensor.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/tensor.cu new file mode 100644 index 0000000000000000000000000000000000000000..253f0c0a7b9aaac3c7f0e1e4390ef578edc01ab9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/tensor.cu @@ -0,0 +1,153 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +**************************************************************************************************/ +/*! \file +\brief unit tests for tensor layout +*/ + +#include "../common/cutlass_unit_test.h" + +#include "cutlass/layout/tensor.h" +#include "cutlass/tensor_coord.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +namespace test { +namespace layout { + void test_NHWC_layout(int n_size, int h_size, int w_size, int c_size) { + int ldc = c_size + 1; + int ldw = ldc * (w_size + 2); + int ldh = ldw * (h_size + 3); + + cutlass::layout::TensorNHWC::Stride tensor_stride({ ldc, ldw, ldh }); + + cutlass::layout::TensorNHWC tensor_nhwc(tensor_stride); + + // test pointer offset + for (int n_idx = 0; n_idx < n_size; n_idx++) { + for (int h_idx = 0; h_idx < h_size; h_idx++) { + for (int w_idx = 0; w_idx < w_size; w_idx++) { + for (int c_idx = 0; c_idx < c_size; c_idx++) { + cutlass::Tensor4DCoord tensor_coord(n_idx, h_idx, w_idx, c_idx); + auto ptr_offset = tensor_nhwc(tensor_coord); + decltype(ptr_offset) reference_offset = c_idx + + w_idx * ldc + + h_idx * ldw + + n_idx * ldh; + EXPECT_EQ(ptr_offset, reference_offset); + } + } + } + } + + // test stride + auto stride = tensor_nhwc.stride(); + EXPECT_EQ(stride, tensor_stride); + + // test capacity + auto capacity = tensor_nhwc.capacity(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); + decltype(capacity) referece_capacity = ldh * n_size; + EXPECT_EQ(capacity, referece_capacity); + + // test packed + auto packed_tensor_layout = tensor_nhwc.packed(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); + auto packed_stride = packed_tensor_layout.stride(); + EXPECT_EQ(packed_stride, cutlass::layout::TensorNHWC::Stride({ c_size, w_size * c_size, h_size * w_size * c_size })); + } + + + void test_NCHW_layout(int n_size, int c_size, int h_size, int w_size) { + int ldw = w_size + 1; + int ldh = ldw * (h_size + 2); + int ldc = ldh * (c_size + 1); + + cutlass::layout::TensorNCHW::Stride tensor_stride({ ldw, ldh, ldc }); + + cutlass::layout::TensorNCHW tensor_nchw(tensor_stride); + + // test pointer offset + for (int n_idx = 0; n_idx < n_size; n_idx++) { + for (int c_idx = 0; c_idx < c_size; c_idx++) { + for (int h_idx = 0; h_idx < w_size; h_idx++) { + for (int w_idx = 0; w_idx < c_size; w_idx++) { + // tensor4DCoord is always created in nhwc order + cutlass::Tensor4DCoord tensor_coord(n_idx, h_idx, w_idx, c_idx); + auto ptr_offset = tensor_nchw(tensor_coord); + decltype(ptr_offset) reference_offset = w_idx + + h_idx * ldw + + c_idx * ldh + + n_idx * ldc; + EXPECT_EQ(ptr_offset, reference_offset); + } + } + } + } + + // test stride + auto stride = tensor_nchw.stride(); + EXPECT_EQ(stride, tensor_stride); + + // test capacity + auto capacity = tensor_nchw.capacity(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); + decltype(capacity) referece_capacity = ldc * n_size; + EXPECT_EQ(capacity, referece_capacity); + + // test packed + auto packed_tensor_layout = tensor_nchw.packed(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); + auto packed_stride = packed_tensor_layout.stride(); + EXPECT_EQ(packed_stride, cutlass::layout::TensorNHWC::Stride({ w_size, w_size * h_size, w_size * h_size * c_size })); + } +} // namespace layout +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Layout_Tensor, NHWC_32_12_10_14) { + int n_size = 32; + int h_size = 12; + int w_size = 10; + int c_size = 14; + test::layout::test_NHWC_layout(n_size, h_size, w_size, c_size); + +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Layout_Tensor, NCHW_32_12_10_14) { + int n_size = 32; + int c_size = 12; + int h_size = 10; + int w_size = 14; + test::layout::test_NCHW_layout(n_size, c_size, h_size, w_size); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/tensor_nhwc.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/tensor_nhwc.cu new file mode 100644 index 0000000000000000000000000000000000000000..e0f6b5ba0f7d82437cd46f0c57a87ec8bff0dd89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/layout/tensor_nhwc.cu @@ -0,0 +1,214 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +**************************************************************************************************/ +/*! \file +\brief unit tests for NHWC tensor layout +*/ + +#include "../common/cutlass_unit_test.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/util/device_memory.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +namespace test { +namespace layout { + + void test_nhwc_layout(int n_size, int h_size, int w_size, int c_size) { + int ldc = c_size + 1; + int ldw = ldc * (w_size + 2); + int ldh = ldw * (h_size + 3); + + typedef cutlass::layout::TensorNHWC Tensor; + + Tensor::Stride tensor_stride({ ldc, ldw, ldh }); + Tensor tensor_nhw_packed_c(tensor_stride); + + // test pointer offset + for (int n_idx = 0; n_idx < n_size; n_idx++) { + for (int p_idx = 0; p_idx < h_size; p_idx++) { + for (int q_idx = 0; q_idx < w_size; q_idx++) { + for (int c_idx = 0; c_idx < c_size; c_idx++) { + cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, c_idx); + auto ptr_offset = tensor_nhw_packed_c(tensor_coord); + decltype(ptr_offset) reference_offset = c_idx + + q_idx * ldc + + p_idx * ldw + + n_idx * ldh; + EXPECT_EQ(ptr_offset, reference_offset); + } + } + } + } + + // test stride + auto stride = tensor_nhw_packed_c.stride(); + EXPECT_EQ(stride, tensor_stride); + + // test capacity + auto capacity = tensor_nhw_packed_c.capacity( + cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); + decltype(capacity) referece_capacity = ldh * n_size; + EXPECT_EQ(capacity, referece_capacity); + + } + + __global__ void test_nhwc_inverse( + int *output, int n_size, int h_size, int w_size, int c_size) { + int ldc = c_size; + int ldw = ldc * w_size; + int ldh = ldw * h_size; + + typedef cutlass::layout::TensorNHWC Tensor; + + Tensor::Stride tensor_stride({ ldc, ldw, ldh }); + Tensor tensor_nhw_packed_c(tensor_stride); + + for (int n_idx = 0; n_idx < n_size; n_idx++) { + for (int p_idx = 0; p_idx < h_size; p_idx++) { + for (int q_idx = 0; q_idx < w_size; q_idx++) { + cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, threadIdx.x); + int ptr_offset = tensor_nhw_packed_c(tensor_coord); + cutlass::Tensor4DCoord inv_coord = tensor_nhw_packed_c.inverse(ptr_offset); + output[ptr_offset] = tensor_nhw_packed_c(inv_coord); + } + } + } + } + + class TestTensorNHWC { + public: + + // + // Data members + // + + // + // Methods + // + + /// Ctor + TestTensorNHWC() { + + } + + /// Runs the test + void run(int n_size, int h_size, int w_size, int c_size) { + + size_t size = n_size * h_size * w_size * c_size; + + /// Device memory containing output + cutlass::device_memory::allocation< int > output(size); + int *output_host = (int *)malloc(sizeof(int) * size); + + dim3 grid(1,1); + dim3 block(c_size, 1, 1); + + test::layout::test_nhwc_inverse<<< grid, block >>>(output.get(), + n_size, h_size, w_size, c_size); + + cudaError_t result = cudaDeviceSynchronize(); + ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); + + // + // Verify output + // + + cutlass::device_memory::copy_to_host(output_host, output.get(), size); + + result = cudaGetLastError(); + ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); + + for (int n_idx = 0; n_idx < n_size; n_idx++) { + for (int p_idx = 0; p_idx < h_size; p_idx++) { + for (int q_idx = 0; q_idx < w_size; q_idx++) { + for (int c_idx = 0; c_idx < c_size; c_idx++) { + int reference_offset = c_idx + + q_idx * c_size + + p_idx * (c_size * w_size) + + n_idx * (c_size * w_size * h_size); + EXPECT_EQ(output_host[reference_offset], reference_offset); + } + } + } + } + } +}; + + +} // namespace layout +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Layout_TensorNHWC, NHWC_1_16_8_32) { + int n_size = 1; + int h_size = 16; + int w_size = 8; + int c_size = 32; + test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size); + test::layout::TestTensorNHWC test_nhwc; + test_nhwc.run(n_size, h_size, w_size, c_size); + +} + +TEST(Layout_TensorNHWC, NHWC_2_16_8_32) { + int n_size = 2; + int h_size = 16; + int w_size = 8; + int c_size = 32; + test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size); + test::layout::TestTensorNHWC test_nhwc; + test_nhwc.run(n_size, h_size, w_size, c_size); +} + +TEST(Layout_TensorNHWC, NHWC_2_16_8_128) { + int n_size = 2; + int h_size = 16; + int w_size = 8; + int c_size = 128; + test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size); + test::layout::TestTensorNHWC test_nhwc; + test_nhwc.run(n_size, h_size, w_size, c_size); + +} + +TEST(Layout_TensorNHWC, NHWC_4_8_16_128) { + int n_size = 4; + int h_size = 8; + int w_size = 16; + int c_size = 128; + test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size); + test::layout::TestTensorNHWC test_nhwc; + test_nhwc.run(n_size, h_size, w_size, c_size); + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..c76581f9c007b6630feecfba668ad1bf501a1f07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/CMakeLists.txt @@ -0,0 +1,114 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +set(CUTLASS_NVRTC_HAS_CUDA_FP16 FALSE) + +# CUTLASS NVRTC target +macro(add_nvrtc_headers BASE_DIR FILES) + foreach(CUTLASS_FILE ${FILES}) + set(OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/nvrtc/${CUTLASS_FILE}") + + string(REPLACE "/" "_" VARIABLE_NAME ${CUTLASS_FILE}) + string(REPLACE "." "_" VARIABLE_NAME ${VARIABLE_NAME}) + + add_custom_command(OUTPUT ${OUTPUT_FILE} + COMMAND ${CMAKE_COMMAND} + -DFILE_IN="${BASE_DIR}/${CUTLASS_FILE}" + -DFILE_OUT="${OUTPUT_FILE}" + -DVARIABLE_NAME="${VARIABLE_NAME}" + -P ${PROJECT_SOURCE_DIR}/bin2hex.cmake + DEPENDS ${BASE_DIR}/${CUTLASS_FILE} + ) + + list(APPEND GENERATED_HEADER_FILES "${OUTPUT_FILE}") + + string(APPEND NVRTC_INCLUDES_HEADERS "#include <${OUTPUT_FILE}>\n") + string(APPEND NVRTC_INCLUDES_STRINGS " ${VARIABLE_NAME},\n") + string(APPEND NVRTC_INCLUDES_NAMES " \"${CUTLASS_FILE}\",\n") + endforeach() +endmacro() + +string(APPEND NVRTC_INCLUDES_STRINGS "char const *kCutlassHeaders[] = {\n") +string(APPEND NVRTC_INCLUDES_NAMES "char const *kCutlassHeaderNames[] = {\n") + + +file(GLOB_RECURSE NVRTC_SOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} kernel/thread/*.hpp) + +add_nvrtc_headers(${PROJECT_SOURCE_DIR}/include "${CUTLASS_CUTLASS};${CUTLASS_UTIL};${CUTLASS_DEVICE}") +add_nvrtc_headers(${PROJECT_SOURCE_DIR}/include "${CUTLASS_CUTE}") +add_nvrtc_headers(${PROJECT_SOURCE_DIR}/test "${CUTLASS_NVRTC};${CUTLASS_UTIL};${CUTLASS_DEVICE}") +add_nvrtc_headers(${CMAKE_CURRENT_SOURCE_DIR} "${NVRTC_SOURCES}") + +add_nvrtc_headers("${CMAKE_CURRENT_SOURCE_DIR}/stdlib" "assert.h;stdint.h") +if(CUTLASS_NVRTC_HAS_CUDA_FP16) + add_nvrtc_headers("${CMAKE_CURRENT_SOURCE_DIR}/stdlib" "cuda_fp16.h;cuda_fp16.hpp") +endif() + +string(APPEND NVRTC_INCLUDES_STRINGS "};\n") +string(APPEND NVRTC_INCLUDES_NAMES "};\n") + +string(APPEND NVRTC_INCLUDES_STRINGS "const size_t kCutlassHeaderCount = sizeof(kCutlassHeaders) / sizeof(*kCutlassHeaders);\n") + +file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/cutlass/nvrtc/environment.cpp" + "#include \n" + "${NVRTC_INCLUDES_HEADERS}" + "\n" + "namespace cutlass {\n" + "namespace nvrtc {\n" + "\n" + "${NVRTC_INCLUDES_STRINGS}" + "\n" + "${NVRTC_INCLUDES_NAMES}" + "\n" + "} // namespace nvrtc\n" + "} // namespace cutlass\n" +) + +set(GENERATED_SOURCE_FILES "${CMAKE_CURRENT_BINARY_DIR}/cutlass/nvrtc/environment.cpp") + +source_group("Generated\\Header Files" FILES ${GENERATED_HEADER_FILES}) +source_group("Generated\\Source Files" FILES ${GENERATED_SOURCE_FILES}) + +cutlass_add_library(cutlass_nvrtc STATIC + cutlass/nvrtc/environment.h + ${GENERATED_SOURCE_FILES} + ${GENERATED_HEADER_FILES} + ) + +target_include_directories( + cutlass_nvrtc + PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR} + ) + +target_link_libraries(cutlass_nvrtc PUBLIC nvidia::nvrtc nvidia::cuda_driver) + +add_subdirectory(thread) + +add_custom_target(cutlass_test_unit_nvrtc DEPENDS cutlass_test_unit_nvrtc_thread) +add_custom_target(test_unit_nvrtc DEPENDS test_unit_nvrtc_thread) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/cutlass/nvrtc/environment.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/cutlass/nvrtc/environment.h new file mode 100644 index 0000000000000000000000000000000000000000..94f3c78251069974a7430a9dbe776ba3c2c86b3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/cutlass/nvrtc/environment.h @@ -0,0 +1,43 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include +#include "cutlass/cutlass.h" + +namespace cutlass { +namespace nvrtc { + +extern char const *kCutlassHeaders[]; +extern char const *kCutlassHeaderNames[]; +extern size_t const kCutlassHeaderCount; +} // namespace nvrtc +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/kernel/thread/contraction.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/kernel/thread/contraction.hpp new file mode 100644 index 0000000000000000000000000000000000000000..65c4437a2e7c4a774fe08870dde12fd0fb24bec4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/kernel/thread/contraction.hpp @@ -0,0 +1,127 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#include "cute/tensor.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" + + +namespace nvrtc { +namespace thread { + +template< + typename ElementA, typename ElementB, typename ElementC, + typename TileShape, typename ClusterShape, + bool kTransA, bool kTransB, + int RANK_M, int RANK_N, int RANK_K, int RANK_L +> +struct ContractionKernel { + +using ElementScalar = float; +using ElementAccum = float; +using EpilogueThread = cutlass::epilogue::thread::LinearCombination; + +static constexpr cute::GMMA::Major majorA = ! kTransA ? cute::GMMA::Major::MN : cute::GMMA::Major::K; +static constexpr cute::GMMA::Major majorB = ! kTransB ? cute::GMMA::Major::K : cute::GMMA::Major::MN; + +/// Kernel config +typedef int64_t stride_type; +typedef int32_t extent_type; + +static constexpr const stride_type* stride_null = nullptr; +static constexpr const extent_type* extent_null = nullptr; + +template +static constexpr +auto +make_stride_tuple(Indexable const& t, int n, int64_t init_default = 0) { + static_assert(Rank > 1); + if constexpr (IsMajor) { + return cute::transform(cute::make_seq{}, [&](auto i) { + if constexpr (i == 0) { + return cute::Int<1>{}; + } + else { + return i < n ? t[i] : init_default; + } + }); + } + else { + return cute::make_int_tuple(t, n, init_default); + } +} + +using StrideA = decltype(cute::make_stride( + make_stride_tuple(stride_null, 0, 0), + make_stride_tuple(stride_null, 0, 0), + cute::make_int_tuple(stride_null, 0, 0))); + +using StrideB = decltype(cute::make_stride( + make_stride_tuple(stride_null, 0, 0), + make_stride_tuple(stride_null, 0, 0), + cute::make_int_tuple(stride_null, 0, 0))); + +using StrideC = decltype(cute::make_stride( + cute::make_int_tuple(stride_null, 0, 0), + cute::make_int_tuple(stride_null, 0, 0), + cute::make_int_tuple(stride_null, 0, 0))); + +using ProblemShape = decltype(cute::make_shape( + cute::make_int_tuple(extent_null, 0, 0), + cute::make_int_tuple(extent_null, 0, 0), + cute::make_int_tuple(extent_null, 0, 0), + cute::make_int_tuple(extent_null, 0, 0))); + +using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< + cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, + ElementA, StrideA, 16 / sizeof(ElementA), + ElementB, StrideB, 16 / sizeof(ElementB), + ElementAccum, + TileShape, ClusterShape, cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::KernelTmaWarpSpecialized +>::CollectiveOp; + +using EpilogueOutputOp = cutlass::epilogue::collective::DefaultEpilogue; +using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter; +using Kernel = cutlass::gemm::kernel::GemmUniversal< + ProblemShape, + CollectiveOp, + CollectiveEpilogue>; + +}; + +} // namespace nvrtc +} // namespace thread diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/kernel/thread/testbed_kernel.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/kernel/thread/testbed_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..c2d9cdef3dd0c4b4b9eb43ae148f2ad80a590960 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/kernel/thread/testbed_kernel.h @@ -0,0 +1,76 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#pragma once + +#include "cutlass/array.h" + +namespace test { +namespace nvrtc { +namespace kernel { +namespace thread { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Thread-level matrix multiply-accumulate +template +__global__ void testbed_kernel( + typename Mma::ElementC *D, + typename Mma::ElementA const *A, + typename Mma::ElementB const *B, + typename Mma::ElementC const *C) { + + auto ptr_D = reinterpret_cast *>(D); + auto ptr_A = reinterpret_cast const *>(A); + auto ptr_B = reinterpret_cast const *>(B); + auto ptr_C = reinterpret_cast const *>(C); + + Mma mma; + + auto a = *ptr_A; + auto b = *ptr_B; + auto c = *ptr_C; + + cutlass::Array d; + + mma(d, a, b, c); + + *ptr_D = d; +} + +} +} +} +} + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/stdlib/assert.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/stdlib/assert.h new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/stdlib/stdint.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/stdlib/stdint.h new file mode 100644 index 0000000000000000000000000000000000000000..f6033de9a78dfa728e9e7511b0154385d0669a33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/stdlib/stdint.h @@ -0,0 +1,129 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +typedef char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; +typedef unsigned short uint16_t; +typedef int int32_t; +typedef unsigned int uint32_t; +typedef long long int int64_t; +typedef unsigned long long int uint64_t; + +#if defined __x86_64__ && !defined __ILP32__ +# define __WORDSIZE 64 +#else +# define __WORDSIZE 32 +#endif + + +/* Small types. */ + +/* Signed. */ +typedef signed char int_least8_t; +typedef short int int_least16_t; +typedef int int_least32_t; +#if __WORDSIZE == 64 +typedef long int int_least64_t; +#else +__extension__ +typedef long long int int_least64_t; +#endif + +/* Unsigned. */ +typedef unsigned char uint_least8_t; +typedef unsigned short int uint_least16_t; +typedef unsigned int uint_least32_t; +#if __WORDSIZE == 64 +typedef unsigned long int uint_least64_t; +#else +__extension__ +typedef unsigned long long int uint_least64_t; +#endif + + +/* Fast types. */ + +/* Signed. */ +typedef signed char int_fast8_t; +#if __WORDSIZE == 64 +typedef long int int_fast16_t; +typedef long int int_fast32_t; +typedef long int int_fast64_t; +#else +typedef int int_fast16_t; +typedef int int_fast32_t; +__extension__ +typedef long long int int_fast64_t; +#endif + +/* Unsigned. */ +typedef unsigned char uint_fast8_t; +#if __WORDSIZE == 64 +typedef unsigned long int uint_fast16_t; +typedef unsigned long int uint_fast32_t; +typedef unsigned long int uint_fast64_t; +#else +typedef unsigned int uint_fast16_t; +typedef unsigned int uint_fast32_t; +__extension__ +typedef unsigned long long int uint_fast64_t; +#endif + +/* Types for `void *' pointers. */ +#if __WORDSIZE == 64 +# ifndef __intptr_t_defined +typedef long int intptr_t; +# define __intptr_t_defined +# endif +typedef unsigned long int uintptr_t; +#else +# ifndef __intptr_t_defined +typedef int intptr_t; +# define __intptr_t_defined +# endif +typedef unsigned int uintptr_t; +#endif + + +/* Largest integral types. */ +#if __WORDSIZE == 64 +typedef long int intmax_t; +typedef unsigned long int uintmax_t; +#else +__extension__ +typedef long long int intmax_t; +__extension__ +typedef unsigned long long int uintmax_t; +#endif + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/.gitignore b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9484314ac8af17dd2cff794abbf378814337c776 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/.gitignore @@ -0,0 +1 @@ +nvrtc_config.hpp diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e164604374b568b05227b1d6b6f1d7f718955492 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/CMakeLists.txt @@ -0,0 +1,40 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +configure_file(nvrtc_config.in nvrtc_config.hpp) + +cutlass_test_unit_add_executable( + cutlass_test_unit_nvrtc_thread + nvrtc_gemm.cu + nvrtc_contraction.cu + testbed.h +) + +target_link_libraries(cutlass_test_unit_nvrtc_thread PRIVATE cutlass_nvrtc) + +target_include_directories(cutlass_test_unit_nvrtc_thread PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_config.in b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_config.in new file mode 100644 index 0000000000000000000000000000000000000000..6291b93bd5b3681a7d6777d66d4a1767af5e94ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_config.in @@ -0,0 +1,3 @@ +#pragma once + +#define CUDA_INCLUDE_DIR "@CUDA_TOOLKIT_ROOT_DIR@/include" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_contraction.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_contraction.cu new file mode 100644 index 0000000000000000000000000000000000000000..934523b53251d4b8d85cb2e4d085473985e62edb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_contraction.cu @@ -0,0 +1,66 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for GETT +*/ + +#include +#include + +#include "testbed.h" + +#include "nvrtc_config.hpp" + +#ifndef CUDA_INCLUDE_DIR +static_assert(0, "CUDA include path is not defined"); +#endif + +TEST(SM90_nvrtc_kernel, Contraction) { + static const char* nvrtc_opts[] = { + "-w", + "-default-device", + "-std=c++17", + "-arch=sm_90", + "-I" CUDA_INCLUDE_DIR, + }; + + EXPECT_TRUE(test::nvrtc::thread::TestbedKernel::compile( + "nvrtc::thread::ContractionKernel<" + "cutlass::bfloat16_t, cutlass::bfloat16_t, cutlass::bfloat16_t," + "cute::Shape, cute::Shape, cute::Shape>," + "cute::Shape," + "true, true," + "10, 10, 10, 10>::Kernel", + { nvrtc_opts, nvrtc_opts + 5 } + )); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_gemm.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_gemm.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b9b8bb38a7f38d82980af0782925b7cf880b9a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/nvrtc_gemm.cu @@ -0,0 +1,203 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/gemm/thread/mma.h" + +#include "testbed.h" + +#if 0 +int main() { + nvrtc::thread::Testbed< + cutlass::gemm::GemmShape<3, 4, 2>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run("cutlass::gemm::thread::Mma, float, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::ColumnMajor >"); + return 0; +} +#endif + +TEST(SM50_Sgemm_thread_nvrtc, DISABLED_col_row_3x4x2) { + + test::nvrtc::thread::Testbed< + cutlass::gemm::GemmShape<3, 4, 2>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run("cutlass::gemm::thread::Mma, float, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::ColumnMajor >"); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#if 0 +TEST(SM50_Sgemm_thread, col_row_3x4x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<3, 4, 2>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, col_row_4x4x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<4, 4, 2>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, row_col_4x4x2) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<4, 4, 2>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, col_row_4x5x3) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<4, 5, 3>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, col_row) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, row_col) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, col_col) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Sgemm_thread, row_row) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::RowMajor, + float, + cutlass::layout::ColumnMajor + >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM50_Dgemm_thread, col_row) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor + >().run(); +} + +TEST(SM50_Dgemm_thread, row_col) { + + test::gemm::thread::Testbed< + cutlass::gemm::GemmShape<8, 8, 1>, + double, + cutlass::layout::RowMajor, + double, + cutlass::layout::ColumnMajor, + double, + cutlass::layout::ColumnMajor + >().run(); +} +#endif +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..0b8d3bd763e1086205c4e561295d429ffca2c795 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/nvrtc/thread/testbed.h @@ -0,0 +1,398 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level GEMM +*/ + +#pragma once + +#include +#include +#include + +#include "cutlass/gemm/thread/mma.h" +#include "../kernel/thread/testbed_kernel.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/trace.h" + +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/gemm.h" + +#include +#include +#include "../cutlass/nvrtc/environment.h" +#include + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace nvrtc { +namespace thread { + +#define NVRTC_RETURN_IF_ERROR(api) \ + do { \ + nvrtcResult _result = api; \ + if (_result != NVRTC_SUCCESS) { \ + CUTLASS_TRACE_HOST("Nvrtc error: " << _result); \ + return false; \ + } \ + } while(0) + +inline const char * cuda_source_fmt = R"""( + +#include "kernel/thread/contraction.hpp" + +using Operator = %s; + +extern "C" __global__ void global_entry(__grid_constant__ Operator::Params const params) { + extern __shared__ char smem[]; + + Operator op; + op(params, smem); +} + +)"""; + +struct TestbedKernel { + static bool compile(std::string const &kernel, std::vector const &opts) { + int sz = std::snprintf(nullptr, 0, cuda_source_fmt, kernel.c_str()); + std::vector cuda_source(sz + 1); + std::snprintf(&cuda_source[0], cuda_source.size(), cuda_source_fmt, kernel.c_str()); + + nvrtcProgram program; + NVRTC_RETURN_IF_ERROR( + nvrtcCreateProgram( + &program, + cuda_source.data(), + nullptr, + static_cast(cutlass::nvrtc::kCutlassHeaderCount), + cutlass::nvrtc::kCutlassHeaders, + cutlass::nvrtc::kCutlassHeaderNames) + ); + + nvrtcResult compile_result = + nvrtcCompileProgram( + program, + static_cast(opts.size()), + opts.data()); + + size_t log_size; + NVRTC_RETURN_IF_ERROR( + nvrtcGetProgramLogSize(program, &log_size) + ); + + if (log_size > 1) { + auto log = std::make_unique(log_size); + + NVRTC_RETURN_IF_ERROR( + nvrtcGetProgramLog(program, log.get()) + ); + + std::cout << log.get() << std::endl; + } + + NVRTC_RETURN_IF_ERROR(compile_result); + + NVRTC_RETURN_IF_ERROR( + nvrtcDestroyProgram(&program) + ); + + return true; + } +}; + +/// Structure to compute the matrix product +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape, + /// Data type of A elements + typename ElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Data type of B elements + typename ElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Element type of C matrix + typename ElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC +> +struct Testbed { + + /// Thread-level matrix multiply-accumulate operator + using Mma = cutlass::gemm::thread::Mma< + Shape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC + >; + + // + // Data members + // + + cutlass::HostTensor tensor_A; + cutlass::HostTensor tensor_B; + cutlass::HostTensor tensor_C; + cutlass::HostTensor tensor_D_computed; + cutlass::HostTensor tensor_D_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed() { + + tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK)); + tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN)); + tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); + tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); + } + + static inline bool check_nvrtc_error(nvrtcResult error) { + if (error != NVRTC_SUCCESS) { + std::cerr << "failed to compile "; + return false; + } + return true; + } + + /// Runs the test + bool run(std::string const &gemm_traits) { + + // + // initialize device memory + // + + cutlass::reference::host::BlockFillSequential( + tensor_A.host_data(), + tensor_A.capacity() + ); + + cutlass::reference::host::BlockFillSequential( + tensor_B.host_data(), + tensor_B.capacity(), + ElementB(1), + ElementB(2) + ); + + cutlass::reference::host::TensorFill( + tensor_C.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_computed.host_view(), + ElementC(0) + ); + + cutlass::reference::host::TensorFill( + tensor_D_reference.host_view(), + ElementC(0) + ); + + tensor_A.sync_device(); + tensor_B.sync_device(); + tensor_C.sync_device(); + tensor_D_computed.sync_device(); + +#if 0 + // launch kernel + cutlass::gemm::kernel::testbed_kernel<<< dim3(1, 1), dim3(1, 1, 1) >>>( + tensor_D_computed.device_data(), + tensor_A.device_data(), + tensor_B.device_data(), + tensor_C.device_data()); + +#else + // Instantiate gemm_kernel + nvrtcResult result_nvrtc; + nvrtcProgram program; + static char const *src = + "#include \"cutlass/gemm/thread/mma.h\"\n" + "#include \"cutlass/gemm/gemm.h\"\n" + "#include \"cutlass/layout/matrix.h\"\n" + "#include \"unit/nvrtc/kernel/thread/testbed_kernel.h\"\n" + ; + + std::string type_name; +#if 0 + // TODO Ideally we'd use nvrtcGetTypeName to determine the type, but it cannot resolve enum symbol names + // As altername solution we might want to implement to_string() to get the traits string. + nvrtcGetTypeName(&type_name); +#else + type_name = gemm_traits; +#endif + + result_nvrtc = nvrtcCreateProgram(&program, + src, + NULL, + (int)cutlass::nvrtc::kCutlassHeaderCount, + cutlass::nvrtc::kCutlassHeaders, + cutlass::nvrtc::kCutlassHeaderNames); + check_nvrtc_error(result_nvrtc); + + std::string gemm_kernel_instantiation = + "test::nvrtc::kernel::thread::testbed_kernel< " + type_name + " >"; + nvrtcAddNameExpression(program, gemm_kernel_instantiation.c_str()); + + const char *opts[] = {"--gpu-architecture=compute_75", + "--std=c++11", + "--include-path=/usr/local/cuda-10.1/include"}; + + result_nvrtc = nvrtcCompileProgram(program, 3, opts); + if (result_nvrtc != NVRTC_SUCCESS) { + size_t logSize; + nvrtcGetProgramLogSize(program, &logSize); + std::vector log(logSize); + nvrtcGetProgramLog(program, log.data()); + std::cout << "Compile log:" << std::endl << log.data() << std::endl; + } + if (!check_nvrtc_error(result_nvrtc)) { + assert(0); + } + + // The lowered name is the name of the template instantiation in the generated PTX code. + char const *gemm_kernel_lowered_name; + nvrtcGetLoweredName(program, gemm_kernel_instantiation.c_str(), &gemm_kernel_lowered_name); + if (!check_nvrtc_error(result_nvrtc)) { + assert(0); + } + + // Query the size of the genereated PTX so that we can allocate storage and retrieve it afterwards + size_t ptx_size; + result_nvrtc = nvrtcGetPTXSize(program, &ptx_size); + if (!check_nvrtc_error(result_nvrtc)) { + assert(0); + } + + std::vector ptx(ptx_size); + result_nvrtc = nvrtcGetPTX(program, ptx.data()); + if (!check_nvrtc_error(result_nvrtc)) { + assert(0); + } + + // we do not need the nvrtc program anymore + //nvrtcDestroyProgram(&program); + + CUmodule module; + CUresult result_cuda; + result_cuda = cuModuleLoadDataEx(&module, ptx.data(), 0, 0, 0); + if (result_cuda != CUDA_SUCCESS) { + assert(0); + } + + CUfunction kernel; + result_cuda = cuModuleGetFunction(&kernel, module, gemm_kernel_lowered_name); + if (result_cuda != CUDA_SUCCESS) { + assert(0); + } + + void* d_a = (void*)tensor_A.device_data(); + void* d_b = (void*)tensor_B.device_data(); + void* d_c = (void*)tensor_C.device_data(); + void* d_d = (void*)tensor_D_computed.device_data(); + void* args[] = { &d_d, &d_a, &d_b, &d_c }; + + // CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void** kernelParams, void** extra + result_cuda = cuLaunchKernel(kernel, 1, 1, 1, 1, 1, 1, 0, 0 /*cudaStreamDefault*/, args, 0); + if (result_cuda != CUDA_SUCCESS) { + assert(0); + } else { +} +#endif + + // verify no errors + cudaError_t result = cudaDeviceSynchronize(); + + if (result != cudaSuccess) { + std::cout << "CUDA ERROR: " << cudaGetErrorString(result); + return false; + } + + tensor_D_computed.sync_host(); + + // + // Reference implementation + // + + //tensor_D_reference.fill(tensor_C.host_view()); + + cutlass::reference::host::Gemm reference_gemm; + + reference_gemm( + {Shape::kM, Shape::kN, Shape::kK}, + ElementC(1), + tensor_A.host_ref(), + tensor_B.host_ref(), + ElementC(0), + tensor_D_reference.host_ref() + ); + + // + // Verify equivalence + // + + // compare + bool passed = cutlass::reference::host::TensorEquals( + tensor_D_computed.host_view(), + tensor_D_reference.host_view() + ); + + if(!passed) std::cout + << "A:\n" << tensor_A.host_view() << "\n\n" + << "B:\n" << tensor_B.host_view() << "\n\n" + << "C:\n" << tensor_C.host_view() << "\n\n" + << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" + << "Computed:\n" << tensor_D_computed.host_view() << std::endl; + + std::cout << "passed " << passed << std::endl; + + return passed; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace thread +} // namespace nvrtc +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..fb38dc72858e32fb2b08843ac87ecb75e666136d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/CMakeLists.txt @@ -0,0 +1,36 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_pipeline + pipeline_tma_async.cu + pipeline_tma_async_warp_specialized.cu + pipeline_tma_async_warp_specialized_persistent.cu + pipeline_async.cu + sequence_barrier.cu +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_async.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_async.cu new file mode 100644 index 0000000000000000000000000000000000000000..964db2fecfca21eb8d5b002496e2cfc879ffbb97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_async.cu @@ -0,0 +1,462 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit test for the PipelineAsync class +*/ + +#define KERNEL_DBG_TRACE false + +#include "../common/cutlass_unit_test.h" +#include +#include + +#include +#include + +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/util/print_error.hpp" +#include "cutlass/util/GPU_Clock.hpp" + +#include "testbed.h" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/arch/barrier.h" +#include "cute/arch/cluster_sm90.hpp" + +using namespace cute; + +//////////////////// KERNEL ///////////////////////// + +template +struct SharedStorage +{ + typename cutlass::PipelineAsync::SharedStorage storage; +}; + +// Goal of this kernel is to complete deadlock-free +// Simple 1 producer warp, one consumer warp scenario +template +__global__ static +void pipeline_async_basic_device(uint32_t const num_iterations) +{ + + extern __shared__ char shared_memory[]; + using MainloopPipeline = typename cutlass::PipelineAsync; + using PipelineState = typename cutlass::PipelineState; + + using SharedStorage = SharedStorage; + SharedStorage& shared_storage = *reinterpret_cast(shared_memory); + + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + int lane_predicate = cute::elect_one_sync(); + dim3 block_id_in_cluster = cute::block_id_in_cluster(); + + // This example showcases 2 producer 1 consumer example + typename MainloopPipeline::Params params; + params.producer_arv_count = 2; + params.consumer_arv_count = 1; + MainloopPipeline pipeline(shared_storage.storage, params); + + // Ensure All CTAs in Cluster have completed init before issuing commits + cute::cluster_arrive_relaxed(); + cute::cluster_wait(); + __syncthreads(); + + + if (lane_predicate) { + // Producer Warps + if (warp_idx==0 || warp_idx==1) { + + PipelineState smem_pipe_write = cutlass::make_producer_start_state(); + int prologue_iterations = min(NumStages, num_iterations); + for ( int i = 0; i < prologue_iterations; ++i) { + // Can also specify stage to commit directly + pipeline.producer_commit(smem_pipe_write); + ++smem_pipe_write; + } + + int mainloop_iterations = num_iterations - prologue_iterations; + + for ( ; mainloop_iterations > 0; --mainloop_iterations) { + pipeline.producer_acquire(smem_pipe_write); + pipeline.producer_commit(smem_pipe_write); + ++smem_pipe_write; + } + } + else { + PipelineState smem_pipe_read; + for (int iter=0 ; iter < num_iterations; ++iter) { + pipeline.consumer_wait(smem_pipe_read); + pipeline.consumer_release(smem_pipe_read); + ++smem_pipe_read; + } + } + } + + // To make sure remote SMEM doesn't get destroyed + cute::cluster_arrive(); + cute::cluster_wait(); +} +///////////////////////////////////////////////////// + +template +struct PipelineTest { + + // + // Data members + // + static constexpr uint32_t Stages = Stages_; + static constexpr uint32_t kBlockSize = 96; + using ClusterShape = ClusterShape_; + + // + // Methods + // + + // Ctor + PipelineTest() = default; + + + // Run CuTe GEMM kernel + cudaError_t run(uint32_t const kNumIters, + cudaStream_t stream = nullptr) { + + // Pipeline (multistage pipeline) + auto cluster_shape = Shape, Int, _1>{}; + + // + // Configure and launch + // + int iterations = 2; + cudaError_t result; + + for (int iter = 0; iter < iterations; ++iter) { + + // Define the tiled MMA layout (static, 4warps) + using MainloopPipeline = typename cutlass::PipelineAsync; + + int smem_size = int(sizeof(SharedStorage)); + + result = cudaFuncSetAttribute( + pipeline_async_basic_device, + cudaFuncAttributeMaxDynamicSharedMemorySize, + smem_size); + + // Launch a single Cluster, with 128 thread per CTA + dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimBlock(kBlockSize,1,1); + + const void* kernel = (const void*)pipeline_async_basic_device; + int iters = kNumIters; + void* kernel_params[] = {reinterpret_cast(&iters)}; + cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); + + } // profiling loop ends + + result = cudaDeviceSynchronize(); + + if (result != cudaSuccess) { + std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; + return result; + } + + return cudaSuccess; + } + +}; + +#if CUDA_12_0_SM90_FEATURES_SUPPORTED +TEST(SM90_Verify_PipelineAsync, Cluster1x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster1x1_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster1x1_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster2x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster2x2_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster2x2_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster1x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster1x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster1x2_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster2x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster2x1_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x1_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster1x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster1x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster2x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster2x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage3) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 3; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage4) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 4; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage6) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 6; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage8) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 8; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage9) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 9; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage11) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 11; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async.cu new file mode 100644 index 0000000000000000000000000000000000000000..3253dfe293eb9e4ad4f3b9bb8e9b1a087544dd14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async.cu @@ -0,0 +1,463 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit test for the PipelineTmaAsync class +*/ + + +#define KERNEL_DBG_TRACE false + +#include "../common/cutlass_unit_test.h" +#include +#include + +#include +#include + +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/util/print_error.hpp" +#include "cutlass/util/GPU_Clock.hpp" + +#include "testbed.h" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/arch/barrier.h" +#include "cute/arch/cluster_sm90.hpp" + +using namespace cute; + +//////////////////// KERNEL ///////////////////////// + +template +struct SharedStorage +{ + typename cutlass::PipelineTmaAsync::SharedStorage storage; +}; + +// Goal of this kernel is to complete deadlock-free +template +__global__ static +void pipeline_device(uint32_t const NumIterations) +{ + + extern __shared__ char shared_memory[]; + using MainloopPipeline = cutlass::PipelineTmaAsync; + using PipelineState = cutlass::PipelineState; + + using SharedStorage = SharedStorage; + SharedStorage& shared_storage = *reinterpret_cast(shared_memory); + + [[maybe_unused]] auto cta_layout = Layout{}; // (m,n) -> cta_id + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + int warp_group_thread_idx = threadIdx.x % 128; + dim3 block_id_in_cluster = cute::block_id_in_cluster(); + + auto cluster_shape = ClusterShape{}; + + // #Producers = #RowsInCluster + #ColsInCluster - 1 + uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; + uint32_t const TmaTransactionBytes = sizeof(uint32_t) * NumProducers; + uint32_t const per_cta_bytes = sizeof(uint32_t); + + // mbarrier.init + typename MainloopPipeline::Params params; + params.transaction_bytes = TmaTransactionBytes; + params.role = MainloopPipeline::ThreadCategory::ProducerConsumer; + params.is_leader = warp_group_thread_idx == 0; + params.num_consumers = 128; + + MainloopPipeline pipeline(shared_storage.storage, params); + + __syncthreads(); + + // Ensure All CTAs in Cluster have completed init before issuing commits + cute::cluster_arrive_relaxed(); + cute::cluster_wait(); + + // Total number of gemm_k_iterations + auto mma_k_iterations = NumIterations; + auto tma_k_iterations = NumIterations; + + PipelineState smem_pipe_read; + // For the DMA (prologue) - we start with an opposite phase - since we skip all waits + // i.e., we know that the buffer is indeed empty + PipelineState smem_pipe_write = cutlass::make_producer_start_state(); + PipelineState smem_pipe_release; + int K_TILE_MMAS = 1; + + int lane_predicate = cute::elect_one_sync(); + int k_pipe_tma_prologue = min(NumStages, tma_k_iterations); + + // DMA Prologue (Loads) + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < k_pipe_tma_prologue; ++i) { + pipeline.producer_acquire(smem_pipe_write); + // cp.async.bulk.tensor would typically happen here + pipeline.producer_commit(smem_pipe_write, per_cta_bytes); + ++smem_pipe_write; + } + tma_k_iterations -= k_pipe_tma_prologue; + + // MMA Prologue (Compute) - modeling inflight MMAs + for (int iter = 0; iter < K_TILE_MMAS; ++iter) + { + pipeline.consumer_wait(smem_pipe_read); + warpgroup_arrive(); + // GMMA would typically happen here + + ++smem_pipe_read; + } + + mma_k_iterations -= K_TILE_MMAS; + + CUTLASS_PRAGMA_NO_UNROLL + for (int iter = 0; iter < mma_k_iterations; ++iter) + { + pipeline.consumer_wait(smem_pipe_read); + + warpgroup_arrive(); + // GMMA would typically happen here + + pipeline.consumer_release(smem_pipe_release); + + if (lane_predicate && (warp_idx == 0) && (tma_k_iterations > 0)) { + pipeline.producer_acquire(smem_pipe_write); + // cp.async.bulk.tensor would typically happen here + pipeline.producer_commit(smem_pipe_write, per_cta_bytes); + ++smem_pipe_write; + --tma_k_iterations; + } + + // next read stage + ++smem_pipe_read; + ++smem_pipe_release; + } + + // To make sure remote SMEM doesn't get destoryed + cute::cluster_arrive(); + cute::cluster_wait(); +} +///////////////////////////////////////////////////// + +/// Device NT GMMA + TMA specialized +template +struct PipelineTest { + + // + // Data members + // + static constexpr uint32_t Stages = Stages_; + static constexpr uint32_t kBlockSize = 128; + using ClusterShape = ClusterShape_; + + // + // Methods + // + + // Ctor + PipelineTest(){}; + + + // Run CuTe GEMM kernel + cudaError_t run(uint32_t const kNumIters, + cudaStream_t stream = 0) { + + float elapsed_ms = 0.0f; + // Pipeline (multistage pipeline) + [[maybe_unused]] auto num_stages = Int{}; + + auto cluster_shape = Shape, Int, _1>{}; + + // + // Configure and launch + // + int iterations = 1; + cudaEvent_t events[2]; + cudaError_t result; + + for (cudaEvent_t & event : events) { + result = cudaEventCreate(&event); + if (result != cudaSuccess) { + std::cerr << "Error: Failed to create event."; + return result; + } + } + + result = cudaEventRecord(events[0]); + + if (result != cudaSuccess) { + std::cerr << "Error: Failed to record start event."; + return result; + } + + for (int iter = 0; iter < iterations; ++iter) { + int smem_size = int(sizeof(SharedStorage)); + + result = cudaFuncSetAttribute( + pipeline_device, + cudaFuncAttributeMaxDynamicSharedMemorySize, + smem_size); + + // Launch a single Cluster, with 128 thread per CTA + dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimBlock(kBlockSize,1,1); + + const void* kernel = (const void*)pipeline_device; + int iters = kNumIters; + void* kernel_params[] = {reinterpret_cast(&iters)}; + cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); + + } // profiling loop ends + + result = cudaEventRecord(events[1]); + + if (result != cudaSuccess) { + std::cerr << "Error: Failed to record stop event."; + return result; + } + + result = cudaDeviceSynchronize(); + + if (result != cudaSuccess) { + std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; + return result; + } + + result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); + + if (result != cudaSuccess) { + std::cerr << "Failed to create event."; + return result; + } + + for (cudaEvent_t & event : events) { + (void)cudaEventDestroy(event); + } + + return cudaSuccess; + } +}; + +#if CUDA_12_0_SM90_FEATURES_SUPPORTED +TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster4x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster4x4_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster2x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster2x1_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster4x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster4x1_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster1x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster1x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster2x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster2x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster4x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync, Cluster4x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async_warp_specialized.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async_warp_specialized.cu new file mode 100644 index 0000000000000000000000000000000000000000..c6fa463a375f828e3976d5e95118f4b3bd2bf10d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async_warp_specialized.cu @@ -0,0 +1,525 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit test for the PipelineTmaAsync class as it would be used in a Warp specialized loop +*/ + +#define KERNEL_DBG_TRACE false + +#include "../common/cutlass_unit_test.h" +#include +#include + +#include +#include + +#include +#include + +#include "cutlass/core_io.h" +#include "cutlass/util/print_error.hpp" +#include "cutlass/util/GPU_Clock.hpp" + +#include "testbed.h" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/arch/barrier.h" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/barrier.h" +#include "cutlass/arch/reg_reconfig.h" + + +using namespace cute; +using namespace cutlass; + +//////////////////// KERNEL ///////////////////////// + +template +struct SharedStorage +{ + typename cutlass::PipelineTmaAsync::SharedStorage storage ; +}; + +struct KernelParams +{ + uint32_t num_iterations; + int* data_ptr; +}; + +// Goal of this kernel is to complete deadlock-free +template +__launch_bounds__(384, 1) +__global__ static +void pipeline_device(KernelParams const kernel_params) +{ + extern __shared__ char shared_memory[]; + using MainloopPipeline = typename cutlass::PipelineTmaAsync; + using PipelineState = typename cutlass::PipelineState; + + using SharedStorage = SharedStorage; + SharedStorage& shared_storage = *reinterpret_cast(shared_memory); + + [[maybe_unused]] auto cta_layout = Layout{}; // (m,n) -> cta_id + int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / 128, 0); + int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0); + int warp_group_thread_idx = threadIdx.x % 128; + dim3 block_id_in_cluster = cute::block_id_in_cluster(); + + auto cluster_shape = ClusterShape{}; + + // #Producers = #RowsInCluster + #ColsInCluster - 1 + uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; + uint32_t const TmaTransactionBytes = static_cast(sizeof(uint32_t) * NumProducers); + uint32_t const per_cta_bytes = sizeof(uint32_t); + + // mbarrier.init + typename MainloopPipeline::Params params; + params.transaction_bytes = TmaTransactionBytes; + if (warp_group_idx == 0) { + params.role = MainloopPipeline::ThreadCategory::Producer; + } + else { + params.role = MainloopPipeline::ThreadCategory::Consumer; + } + params.is_leader = warp_group_thread_idx == 0; + params.num_consumers = 128; + + MainloopPipeline pipeline(shared_storage.storage, params); + + __syncthreads(); + + // Ensure All CTAs in Cluster have completed init before issuing commits + cute::cluster_arrive_relaxed(); + cute::cluster_wait(); + + + // Producer WarpGroup + if (warp_group_idx == 0) { + cutlass::arch::warpgroup_reg_alloc<232>(); + + int lane_predicate = cute::elect_one_sync(); + if (warp_idx_in_warpgroup == 0 && lane_predicate) { + + int tma_k_prologue = min(Stages, kernel_params.num_iterations); + + // Simulating Prologue TMA Loads + // For the DMA (prologue) - we start with an opposite phase - since we skip all waits + // i.e., we know that the buffer is indeed empty + PipelineState smem_pipe_write = make_producer_start_state(); + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < tma_k_prologue; ++i) { + pipeline.producer_acquire(smem_pipe_write); + // Simulating cp.async.bulk.tensor behavior + pipeline.producer_commit(smem_pipe_write, per_cta_bytes); + ++smem_pipe_write; + } + int tma_k_iter = kernel_params.num_iterations - tma_k_prologue; + + // Simulating Mainloop TMA Loads + CUTE_NO_UNROLL + for ( ; tma_k_iter > 0; --tma_k_iter) { + + pipeline.producer_acquire(smem_pipe_write); + + // Simulating cp.async.bulk.tensor behavior + pipeline.producer_commit(smem_pipe_write, per_cta_bytes); + + // Advance write stage + ++smem_pipe_write; + } + + // Tail Loop + // Handles the case where we never enter the mainloop + PipelineState tail = tma_k_prologue == Stages ? smem_pipe_write : PipelineState{}; + for ( int i = 0; i < tma_k_prologue; ++i) { + pipeline.producer_acquire(tail); + ++tail; + } + } + // Consumer WarpGroup + } else if(warp_group_idx == 1) { + cutlass::arch::warpgroup_reg_alloc<232>(); + + PipelineState smem_pipe_read; + PipelineState smem_pipe_release; + + // simulates accumulators + extra reg. pressure + int arr[168]; + + // Init Shared Memory read stages & PhaseBit + static constexpr uint32_t K_PIPE_MMAS = 1; + static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight"); + + // Total number of gemm iterations + auto gemm_k_iterations = kernel_params.num_iterations; + + // Simulating Prologue MMAs + int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations); + CUTLASS_PRAGMA_UNROLL + for (int iter = 0; iter < mma_k_prologue; ++iter) { + pipeline.consumer_wait(smem_pipe_read); + + warpgroup_arrive(); + // GMMA would typically happen here + + ++smem_pipe_read; + } + gemm_k_iterations -= mma_k_prologue; + + // Simulating Mainloop MMAs + CUTLASS_PRAGMA_NO_UNROLL + for ( ; gemm_k_iterations > 0; --gemm_k_iterations) { + + /// Wait on the smem_pipe_read stage / phase + pipeline.consumer_wait(smem_pipe_read); + + warpgroup_arrive(); + // GMMA would typically happen here + + // Dummy op - which will never happen + // But simulates high register usage. + CUTE_UNROLL + for(int i = 0; i < 168; ++i){ + if (threadIdx.x > 256){ + arr[i] += kernel_params.data_ptr[i]; + } + } + + pipeline.consumer_release(smem_pipe_release); + + // Advance stages + ++smem_pipe_read; + ++smem_pipe_release; + } + + // Dummy op - which will never happen + CUTE_UNROLL + for(int i = 0; i < 168; ++i){ + if (threadIdx.x > 256){ + kernel_params.data_ptr[i] = arr[i]; + } + } + + // Tail Loop + for (int i = 0; i < K_PIPE_MMAS; ++i){ + pipeline.consumer_release(smem_pipe_release); + ++smem_pipe_release; + } + + // Warp-Group #2 + } else { + cutlass::arch::warpgroup_reg_dealloc<40>(); + } +} +///////////////////////////////////////////////////// + +/// Device NT GMMA + TMA specialized +template +struct PipelineTest { + + // + // Data members + // + static constexpr uint32_t Stages = Stages_; + static constexpr uint32_t kBlockSize = 128 * 3; + using ClusterShape = ClusterShape_; + + // + // Methods + // + + // Ctor + PipelineTest(){}; + + // Run CuTe GEMM kernel + cudaError_t run(uint32_t const kNumIters, + cudaStream_t stream = 0) { + + float elapsed_ms = 0.0f; + // Pipeline (multistage pipeline) + [[maybe_unused]] auto num_stages = Int{}; + auto cluster_shape = Shape, Int, _1>{}; + + // + // Configure and launch + // + int iterations = 1; + cudaEvent_t events[2]; + cudaError_t result; + + for (cudaEvent_t & event : events) { + result = cudaEventCreate(&event); + if (result != cudaSuccess) { + std::cerr << "Error: Failed to create event."; + return result; + } + } + + result = cudaEventRecord(events[0]); + + if (result != cudaSuccess) { + std::cerr << "Error: Failed to record start event."; + return result; + } + + for (int iter = 0; iter < iterations; ++iter) { + + using MainloopPipeline = typename cutlass::PipelineTmaAsync; + + int smem_size = int(sizeof(SharedStorage)); + + result = cudaFuncSetAttribute( + pipeline_device, + cudaFuncAttributeMaxDynamicSharedMemorySize, + smem_size); + + // Launch a single Cluster, with kBlockSize threads per CTA + dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimBlock(kBlockSize,1,1); + + const void* kernel = (const void*)pipeline_device; + KernelParams params{kNumIters, nullptr}; + void* kernel_params[] = {reinterpret_cast(¶ms)}; + cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); + + } + + result = cudaEventRecord(events[1]); + + if (result != cudaSuccess) { + std::cerr << "Error: Failed to record stop event."; + return result; + } + + result = cudaDeviceSynchronize(); + + if (result != cudaSuccess) { + std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; + return result; + } + + result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); + + if (result != cudaSuccess) { + std::cerr << "Failed to create event."; + return result; + } + + for (cudaEvent_t & event : events) { + (void)cudaEventDestroy(event); + } + + return cudaSuccess; + } +}; + +#if CUDA_12_0_SM90_FEATURES_SUPPORTED +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x1_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x1_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async_warp_specialized_persistent.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async_warp_specialized_persistent.cu new file mode 100644 index 0000000000000000000000000000000000000000..efb389be89327847675987e98e947d52bd1d94b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/pipeline_tma_async_warp_specialized_persistent.cu @@ -0,0 +1,578 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit test for the PipelineTmaAsync class used in a WarpSpecialized Persistent loop +*/ + +#define KERNEL_DBG_TRACE false + +#include "../common/cutlass_unit_test.h" +#include +#include + +#include +#include + +#include +#include + +#include "cutlass/core_io.h" +#include "cutlass/util/print_error.hpp" +#include "cutlass/util/GPU_Clock.hpp" + +#include "testbed.h" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/arch/barrier.h" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/barrier.h" +#include "cutlass/arch/reg_reconfig.h" + + +using namespace cute; +using namespace cutlass; + +//////////////////// KERNEL ///////////////////////// + +template +struct SharedStorage +{ + typename cutlass::PipelineTmaAsync::SharedStorage pipeline_storage; + typename PingPongBarrier::SharedStorage pingpong_storage; +}; + +template +struct CollectiveSimulation { + using MainloopPipeline = typename cutlass::PipelineTmaAsync; + using PipelineState = typename cutlass::PipelineState; + + CUTLASS_DEVICE + static void + dma_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe, + uint32_t const num_iterations) { + uint32_t const per_cta_bytes = sizeof(uint32_t); + int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0); + int lane_predicate = cute::elect_one_sync(); + if (warp_idx_in_warpgroup==0 && lane_predicate) { + + int tma_k_prologue = min(Stages, num_iterations); + + // Simulating Prologue TMA Loads + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < tma_k_prologue; ++i) { + pipeline.producer_acquire(tile_start_state_pipe); + // Simulating cp.async.bulk.tensor behavior + pipeline.producer_commit(tile_start_state_pipe, per_cta_bytes); + ++tile_start_state_pipe; + } + int tma_k_iter = num_iterations - tma_k_prologue; + + PipelineState wr_pipe = tile_start_state_pipe; + // Simulating Mainloop TMA Loads + CUTE_NO_UNROLL + for ( ; tma_k_iter > 0; --tma_k_iter){ + + pipeline.producer_acquire(wr_pipe); + + // Simulating cp.async.bulk.tensor behavior + pipeline.producer_commit(wr_pipe, per_cta_bytes); + + // Advance write stage + ++wr_pipe; + } + } + } + + CUTLASS_DEVICE + static void + math_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe, + uint32_t const num_iterations, int* data_ptr) { + PipelineState rd_pipe = tile_start_state_pipe; + PipelineState release_pipe = rd_pipe; + + // simulates accumulators + extra reg. pressure + int arr[168]; + + // Init Shared Memory read stages & PhaseBit + static constexpr uint32_t K_PIPE_MMAS = 1; + static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight"); + + // Total number of gemm iterations + auto gemm_k_iterations = num_iterations; + + // Simulating Prologue MMAs + int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations); + CUTLASS_PRAGMA_UNROLL + for (int iter = 0; iter < mma_k_prologue; ++iter) { + pipeline.consumer_wait(rd_pipe); + + warpgroup_arrive(); + // GMMA would typically happen here + + ++rd_pipe; + } + gemm_k_iterations -= mma_k_prologue; + + // Simulating Mainloop MMAs + CUTLASS_PRAGMA_NO_UNROLL + for ( ; gemm_k_iterations > 0; --gemm_k_iterations) { + + /// Wait on the rd_pipe stage / phase + pipeline.consumer_wait(rd_pipe); + + warpgroup_arrive(); + // GMMA would typically happen here + + // Dummy op - which will never happen + // But simulates high register usage. + CUTE_UNROLL + for(int i = 0; i < 168; ++i){ + if (threadIdx.x > 384){ + arr[i] += data_ptr[i]; + } + } + + pipeline.consumer_release(release_pipe); + + // Advance stages + ++rd_pipe; + ++release_pipe; + } + + // Dummy op - which will never happen + CUTE_UNROLL + for(int i = 0; i < 168; ++i){ + if (threadIdx.x > 384){ + data_ptr[i] = arr[i]; + } + } + + // Tail Loop + for (int i = 0; i < K_PIPE_MMAS; ++i){ + pipeline.consumer_release(release_pipe); + ++release_pipe; + } + + } +}; + +struct KernelParams +{ + uint32_t num_iterations; + int tiles_per_cluster; + int* data_ptr; +}; + +// Goal of this kernel is to complete deadlock-free +template +__launch_bounds__(384, 1) +__global__ static +void pipeline_device(KernelParams params) +{ + extern __shared__ char shared_memory[]; + using MainloopPipeline = typename cutlass::PipelineTmaAsync; + using PipelineState = typename cutlass::PipelineState; + + /* One for Mainloop and one for Epilogue */ + constexpr int StagesPerMathWarpGroup = 2; + constexpr int MathWarpGroupCountPersistent = 2; + using PingPongBarrier = typename cutlass::OrderedSequenceBarrier; + + using SharedStorage = SharedStorage; + SharedStorage& shared_storage = *reinterpret_cast(shared_memory); + + [[maybe_unused]] auto cta_layout = Layout{}; // (m,n) -> cta_id + int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0); + int warp_group_thread_idx = threadIdx.x % NumThreadsPerWarpGroup; + dim3 block_id_in_cluster = cute::block_id_in_cluster(); + + auto cluster_shape = ClusterShape{}; + + // #Producers = #RowsInCluster + #ColsInCluster - 1 + uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; + uint32_t const TmaTransactionBytes = static_cast(sizeof(uint32_t) * NumProducers); + + // mbarrier.init + typename MainloopPipeline::Params pipeline_params; + pipeline_params.transaction_bytes = TmaTransactionBytes; + if (warp_group_idx == 0) { + pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + else { + pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + pipeline_params.is_leader = warp_group_thread_idx == 0; + pipeline_params.num_consumers = NumThreadsPerWarpGroup; + + MainloopPipeline pipeline(shared_storage.pipeline_storage, pipeline_params); + PipelineState tile_start_state_pipe; + + int tiles_per_cluster = params.tiles_per_cluster; + + /* Offset pipeline start state for Math WG 2 */ + if (warp_group_idx == 2) { + // Update pipeline state for next persistent tile + tile_start_state_pipe.advance(params.num_iterations); + tiles_per_cluster--; + } + + typename PingPongBarrier::Params pingpong_params; + pingpong_params.group_id = warp_group_idx - 1; // Since DMA Warp Group Idx 0 will not participate + pingpong_params.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group + PingPongBarrier math_wg_barrier(shared_storage.pingpong_storage, pingpong_params); + + __syncthreads(); + + // Ensure All CTAs in Cluster have completed init before issuing commits + cute::cluster_arrive_relaxed(); + cute::cluster_wait(); + + // Producer/DMA WarpGroup + if (warp_group_idx == 0) { + cutlass::arch::warpgroup_reg_dealloc<40>(); + // For the DMA (prologue) - we start with an opposite phase - since we skip all waits + // i.e., we know that the buffer is indeed empty + PipelineState tile_prologue_state_pipe = make_producer_start_state(); + while (tiles_per_cluster > 0) { + CollectiveSimulation::dma_wg_simulation(pipeline, tile_prologue_state_pipe, params.num_iterations); + // Update pipeline state for next persistent tile + tile_prologue_state_pipe.advance(params.num_iterations); + tiles_per_cluster--; + } + } + // Math WarpGropups + if(warp_group_idx == 1 || warp_group_idx == 2) { + cutlass::arch::warpgroup_reg_alloc<232>(); + while (tiles_per_cluster > 0) { + // MMA + math_wg_barrier.wait(); + CollectiveSimulation::math_wg_simulation(pipeline, tile_start_state_pipe, params.num_iterations, params.data_ptr); + math_wg_barrier.arrive(); + // Epilogue + math_wg_barrier.wait(); + // Simulates long running stage + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) + __nanosleep(100000); + #endif + math_wg_barrier.arrive(); + // Update pipeline state for next persistent tile + tile_start_state_pipe.advance(params.num_iterations * 2); + tiles_per_cluster -= 2; + } + } + + // Makes sure remote SMEM doesn't get destroyed + cute::cluster_arrive_relaxed(); + cute::cluster_wait(); +} +///////////////////////////////////////////////////// + +/// Device NT GMMA + TMA specialized +template +struct PipelineTest { + + // + // Data members + // + static constexpr uint32_t Stages = Stages_; + static constexpr uint32_t kBlockSize = 128 * 3; + using ClusterShape = ClusterShape_; + + // + // Methods + // + + // Run CuTe GEMM kernel + cudaError_t run(uint32_t const kNumIters, + cudaStream_t stream = 0) { + + float elapsed_ms = 0.0f; + // Pipeline (multistage pipeline) + auto cluster_shape = Shape, Int, _1>{}; + + // + // Configure and launch + // + int iterations = 1; + cudaEvent_t events[2]; + cudaError_t result; + + for (cudaEvent_t & event : events) { + result = cudaEventCreate(&event); + if (result != cudaSuccess) { + std::cerr << "Error: Failed to create event."; + return result; + } + } + + result = cudaEventRecord(events[0]); + + if (result != cudaSuccess) { + std::cerr << "Error: Failed to record start event."; + return result; + } + + for (int iter = 0; iter < iterations; ++iter) { + constexpr int StagesPerMathWarpGroup = 2; + constexpr int MathWarpGroupCountPersistent = 2; + int smem_size = int(sizeof(SharedStorage>)); + + result = cudaFuncSetAttribute( + pipeline_device, + cudaFuncAttributeMaxDynamicSharedMemorySize, + smem_size); + + // Launch a single Cluster, with kBlockSize threads per CTA + dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimBlock(kBlockSize,1,1); + + int tiles_per_cluster = (kNumIters % 10) + 1; + printf("Persistent version: Tiles per Cluster = %d\n", tiles_per_cluster); + + const void* kernel = (const void*)pipeline_device; + KernelParams params{kNumIters, tiles_per_cluster, nullptr}; + void *kernel_params[] = {¶ms}; + cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); + + } + + result = cudaEventRecord(events[1]); + + if (result != cudaSuccess) { + std::cerr << "Error: Failed to record stop event."; + return result; + } + + result = cudaDeviceSynchronize(); + + if (result != cudaSuccess) { + std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; + return result; + } + + result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); + + if (result != cudaSuccess) { + std::cerr << "Failed to create event."; + return result; + } + + for (cudaEvent_t & event : events) { + (void)cudaEventDestroy(event); + } + + return cudaSuccess; + } +}; + +#if CUDA_12_0_SM90_FEATURES_SUPPORTED +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage10) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr uint32_t Stages = 10; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage5) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 5; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage2) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage7) { + Options options; + using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; + static constexpr uint32_t Stages = 7; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/sequence_barrier.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/sequence_barrier.cu new file mode 100644 index 0000000000000000000000000000000000000000..45b3ee9be7f7cc5e1c755b52bdf9487f70227358 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/sequence_barrier.cu @@ -0,0 +1,226 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Unit test for the OrderedSequenceBarrier class +*/ + +#include "../common/cutlass_unit_test.h" +#include +#include + +#include +#include + +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/util/print_error.hpp" +#include "cutlass/util/GPU_Clock.hpp" + +#include "testbed.h" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/arch/barrier.h" +#include "cute/arch/cluster_sm90.hpp" + +using namespace cute; + +//////////////////// KERNEL ///////////////////////// + +template +struct SharedStorage +{ + typename OrderedSequencer::SharedStorage storage; +}; + +// Goal of this kernel is to complete deadlock-free +template +__global__ static +void ordered_sequence_device(uint32_t const num_iterations) +{ + + extern __shared__ char shared_memory[]; + using SequenceBarrier = typename cutlass::OrderedSequenceBarrier; + using SmemStorage = SharedStorage; + + SmemStorage& shared_storage = *reinterpret_cast(shared_memory); + + int group_idx = threadIdx.x / ThreadsPerGroup; + + typename SequenceBarrier::Params params; + params.group_id = group_idx; // sequence ID + params.group_size = ThreadsPerGroup; // Number of threads / participants in a group + + SequenceBarrier barrier(shared_storage.storage, params); + + // Ensure All CTAs in Cluster have completed init before issuing commits + __syncthreads(); + cute::cluster_arrive_relaxed(); + cute::cluster_wait(); + + CUTLASS_PRAGMA_NO_UNROLL + for (int i = 0; i < num_iterations; ++i){ + + barrier.wait(); + // STAGE 1 CODE... + #ifndef NDEBUG + int thread_idx_in_group = threadIdx.x % ThreadsPerGroup; + if (thread_idx_in_group == 0) { + printf("STAGE 0 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x); + } + #endif + // Simulates long running stage + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) + __nanosleep(100000); + #endif + barrier.arrive(); + + barrier.wait(); + // STAGE 2 CODE... + #ifndef NDEBUG + if (thread_idx_in_group == 0) { + printf("STAGE 1 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x); + } + #endif + // Simulates long running stage + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) + __nanosleep(100000); + #endif + barrier.arrive(); + } + + // To make sure remote SMEM doesn't get destroyed + cute::cluster_arrive(); + cute::cluster_wait(); +} +///////////////////////////////////////////////////// + +template +struct PipelineTest { + + // + // Data members + // + static constexpr uint32_t ThreadsPerGroup = 128; + static constexpr uint32_t BlockSize = GroupCount_ * ThreadsPerGroup; + static constexpr uint32_t Stages = Stages_; + static constexpr uint32_t GroupCount = GroupCount_; + using SequenceBarrier = typename cutlass::OrderedSequenceBarrier; + using SmemStorage = SharedStorage; + + // + // Methods + // + + // Run CuTe GEMM kernel + cudaError_t run(uint32_t const kNumIters, + cudaStream_t stream = nullptr) { + + // Pipeline (multistage pipeline) + auto cluster_shape = Shape<_1, _1, _1>{}; + + // + // Configure and launch + // + int iterations = 1; + cudaError_t result; + + for (int iter = 0; iter < iterations; ++iter) { + + int smem_size = int(sizeof(SmemStorage)); + + result = cudaFuncSetAttribute( + ordered_sequence_device, + cudaFuncAttributeMaxDynamicSharedMemorySize, + smem_size); + + // Launch a single Cluster, with 128 thread per CTA + dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), size<2>(cluster_shape)); + dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); + dim3 dimBlock(BlockSize,1,1); + + const void* kernel = (const void*)ordered_sequence_device; + int iters = kNumIters; + void* kernel_params[] = {reinterpret_cast(&iters)}; + cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); + + } // profiling loop ends + + result = cudaDeviceSynchronize(); + + if (result != cudaSuccess) { + std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; + return result; + } + + return cudaSuccess; + } +}; + +#if CUDA_12_0_SM90_FEATURES_SUPPORTED +TEST(SM90_Verify_OrderedSequence, Depth_2_Length_2) { + Options options; + static constexpr uint32_t GroupCount = 2; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_OrderedSequence, Depth_2_Length_3) { + Options options; + static constexpr uint32_t GroupCount = 3; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_OrderedSequence, Depth_2_Length_4) { + Options options; + static constexpr uint32_t GroupCount = 4; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} + +TEST(SM90_Verify_OrderedSequence, Depth_2_Length_5) { + Options options; + static constexpr uint32_t GroupCount = 5; + static constexpr uint32_t Stages = 2; + using Test = PipelineTest; + Testbed testbed(options); + EXPECT_TRUE(testbed.verification()); +} +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..b809e74324c5cf429ec950e23a3dc1509a3b5499 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/pipeline/testbed.h @@ -0,0 +1,145 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Common Testbed file shared by Pipeline unit tests +*/ + +#include +#include +#include +#include + +#include "cutlass/util/command_line.h" +#include "../common/cutlass_unit_test.h" + +#if CUDA_12_0_SM90_FEATURES_SUPPORTED + #define CUTLASS_UNIT_TEST_PIPELINE true +#else + #define CUTLASS_UNIT_TEST_PIPELINE false +#endif + +// Command line test options +struct Options { + // + // Data Members + // + bool help; + bool verification_enabled; + int SM_count; + int clock_MHz; + + // + // Methods + // + Options(): + help(false), + verification_enabled(true), + SM_count(116), + clock_MHz(1477) + { } + + void parse(int argc, char const **args) { + cutlass::CommandLine cmd(argc, args); + + if (cmd.check_cmd_line_flag("help")) { + help = true; + } + + cmd.get_cmd_line_argument("verification-enabled", verification_enabled, true); + cmd.get_cmd_line_argument("sm-count", SM_count, 116); + cmd.get_cmd_line_argument("clock", clock_MHz, 1477); + } + + /// Prints the usage statement. + std::ostream & print_usage(std::ostream &out) const { + + out << "Options:\n\n" + << " --help If specified, displays this usage statement.\n\n" + << " --verification-enabled= Enable/Disable verification\n" + << " --sm-count= Number of SMs on the chip\n" + << " --clock= Locked clock value in Mhz\n"; + + return out; + } +}; + +// +// Testbed +// + +template +struct Testbed { +private: + // Commandline options + Options options; + + void run_test(uint32_t const kNumIters) { + + // Run CuTe Gemm + Pipeline pipeline; + + cudaError_t result = pipeline.run(kNumIters); + + CUTE_CHECK_LAST(); + } + + +public: + Testbed(Options const &options_) : options(options_) { + int device_id = 0; + cudaDeviceProp device_prop; + CUTE_CHECK_ERROR(cudaSetDevice(device_id)); + CUTE_CHECK_ERROR(cudaGetDeviceProperties(&device_prop, device_id)); + + if (device_prop.major < 1) { + fprintf(stderr, "Device does not support CUDA.\n"); + exit(1); + } + } + + /// Run verification Gemm problem sizes + bool verification() { + + std::array kNumIters; + + for (int i = 0; i < kNumIters.size(); ++i) { + kNumIters[i] = (rand() % 1000) + 1; + } + + for (int n : kNumIters) { + std::cout << "Stages = " << Pipeline::Stages << " kNumIters = " << n << "\n"; + run_test(n); + } + + return true; + } +}; diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..910d2a9d899bab95b3270b4ffc72cc77726e5d49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/CMakeLists.txt @@ -0,0 +1,47 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +add_subdirectory(thread) +add_subdirectory(kernel) +add_subdirectory(device) + +add_custom_target( + cutlass_test_unit_reduction + DEPENDS + cutlass_test_unit_reduction_thread + cutlass_test_unit_reduction_kernel + cutlass_test_unit_reduction_device + ) + +add_custom_target( + test_unit_reduction + DEPENDS + test_unit_reduction_thread + test_unit_reduction_kernel + test_unit_reduction_device + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..48372df016e713d018d4c0e7f02f83d679e18f5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/CMakeLists.txt @@ -0,0 +1,34 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_reduction_device + tensor_reduce_strided.cu + tensor_reduce_contiguous.cu +) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/tensor_reduce_contiguous.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/tensor_reduce_contiguous.cu new file mode 100644 index 0000000000000000000000000000000000000000..c582eb5b07014efeaf4ccfe89fc787cd1b38ca1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/tensor_reduce_contiguous.cu @@ -0,0 +1,476 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for TensorReduce family of device-wide operators +*/ + +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/reduction/thread/reduction_operators.h" +#include "cutlass/reduction/device/tensor_reduce.h" + +#include "cutlass/functional.h" +#include "cutlass/layout/tensor.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/device/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/tensor_view_io.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This reduces the C dimension, transforming an NHWC tensor into NHWC with C=1. +template +bool TestAllReduction_NHWC_reduce_c(ElementCompute reduction_identity = ElementCompute()) { + + using Layout = typename TensorReduction::Layout; + using ElementOutput = typename TensorReduction::ElementOutput; + using ElementSource = typename TensorReduction::ElementSource; + + int const kV = TensorReduction::kVectorLength; + + int const N_indices[] = {3, 13}; + int const H_indices[] = {5, 17}; + int const W_indices[] = {7, 19}; + int const C_indices[] = {2049, 2048, 2047, 384, 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1}; + + for (int N : N_indices) { + for (int H : H_indices) { + for (int W : W_indices) { + for (int Cx : C_indices) { + + int C = Cx * kV; + + cutlass::HostTensor src_tensor({N, H, W, C}); + cutlass::HostTensor dst_tensor({N, H, W, 1}); + + cutlass::reference::host::TensorFillRandomUniform( + src_tensor.host_view(), 17, 10, -10, 0); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + // Execute a tensor reduction over rank 3 (the 'C' dimension is reduced; NHWC => NHW) + TensorReduction reduction(src_tensor.extent(), 3); + + cutlass::DeviceAllocation device_workspace(reduction.workspace_size()); + + cutlass::Status status = reduction.reduce( + dst_tensor.device_ref(), + src_tensor.device_ref(), + device_workspace.get(), + reduction_identity + ); + + EXPECT_EQ(status, cutlass::Status::kSuccess); + EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess); + + dst_tensor.sync_host(); + + typename TensorReduction::ReductionOp reduction_op; + + // + // Reference check + // + for (int n = 0; n < src_tensor.extent().n(); ++n) { + for (int h = 0; h < src_tensor.extent().h(); ++h) { + for (int w = 0; w < src_tensor.extent().w(); ++w) { + + ElementCompute c_accum = reduction_identity; + + for (int c = 0; c < src_tensor.extent().c(); ++c) { + c_accum = reduction_op(c_accum, ElementCompute(src_tensor.at({n, h, w, c}))); + } + + ElementCompute got = ElementCompute(dst_tensor.at({n, h, w, 0})); + + bool equal = (c_accum == got); + + EXPECT_TRUE(equal); + if (!equal) { + + std::cerr + << "Error at location (" << n << ", " << h << ", " << w << ", 0)" << std::endl; + + std::cerr + << " expected: " << c_accum << std::endl + << " got: " << got << std::endl; + + std::cerr + << "Problem: " << src_tensor.extent() << " -> " + << dst_tensor.extent() << std::endl; + + std::cerr + << " Grid: " << reduction.reduction_strided.grid_shape + << "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl + << " FInal: " << reduction.reduction_strided.grid_final + << "\n Block: " << reduction.reduction_strided.threadblock_final << "\n"; + + return false; + } + + } //w + } // h + } // n + + // + // Next problem + // + + } // C + } // W + } // H + } // N + + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + int const kV = 1; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1_f16x1) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = cutlass::half_t; + using ElementCompute = float; + int const kV = 1; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + int const kV = 2; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2_f16x2) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = cutlass::half_t; + using ElementCompute = float; + int const kV = 2; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + int const kV = 4; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4_f16x4) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = cutlass::half_t; + using ElementCompute = float; + int const kV = 4; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_maximum_c_f32x4) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + int const kV = 4; + + // Define the functor + using Functor = cutlass::maximum; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c( -std::numeric_limits::max() )); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_minimum_c_f32x4) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + int const kV = 4; + + // Define the functor + using Functor = cutlass::minimum; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c( std::numeric_limits::max() )); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_ANY_c_s32) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = int; + using ElementSource = int; + using ElementCompute = int; + int const kV = 1; + + // Define the functor + using Functor = cutlass::logical_or; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c( ElementCompute(0) )); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_ALL_c_s32) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = int; + using ElementSource = int; + using ElementCompute = int; + int const kV = 1; + + // Define the functor + using Functor = cutlass::logical_and; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c( ElementCompute(1) )); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_ANY_c_f32) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + int const kV = 1; + + // Define the functor + using Functor = cutlass::logical_or; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c( ElementCompute(0) )); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHW +TEST(Reduction_TensorReduce, nhwc_ALL_c_f32) { + + using Layout = cutlass::layout::TensorNHWC; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + int const kV = 1; + + // Define the functor + using Functor = cutlass::logical_and; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_c( ElementCompute(1) )); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/tensor_reduce_strided.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/tensor_reduce_strided.cu new file mode 100644 index 0000000000000000000000000000000000000000..7e9ccc30f7f1b18d8b4372e4ba1432699e4d37c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/device/tensor_reduce_strided.cu @@ -0,0 +1,523 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for TensorReduce family of device-wide operators +*/ + +#include +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/reduction/thread/reduction_operators.h" +#include "cutlass/reduction/device/tensor_reduce.h" + +#include "cutlass/functional.h" +#include "cutlass/layout/tensor.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/device/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/tensor_view_io.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This reduces the W dimension, transforming an NHWC tensor into NHWC with W=1. +template < + typename TensorReduction, + typename ElementCompute = typename TensorReduction::ElementCompute +> +bool TestAllReduction_NHWC_reduce_w(ElementCompute reduction_identity = ElementCompute()) { + + using Layout = typename TensorReduction::Layout; + using ElementOutput = typename TensorReduction::ElementOutput; + using ElementSource = typename TensorReduction::ElementSource; + + int const kV = TensorReduction::kVectorLength; + + int const N_indices[] = {1, 2, 5, 10}; + int const H_indices[] = {1, 3, 9 }; + int const W_indices[] = {1, 5, 19, 40, 224}; + int const C_indices[] = { + kV, + 2 * kV, + 5 * kV, + 9 * kV, + 17 * kV, + 39 * kV, + 257 * kV, + kV * 760 + }; + + using Element = int; + + for (int N : N_indices) { + for (int H : H_indices) { + for (int W : W_indices) { + for (int C : C_indices) { + + cutlass::HostTensor src_tensor({N, H, W, C}); + cutlass::HostTensor dst_tensor({N, H, 1, C}); + + cutlass::reference::host::TensorFillRandomUniform( + src_tensor.host_view(), 17, 10, -10, 0); + + cutlass::reference::host::BlockFillSequential( + dst_tensor.host_data(), dst_tensor.capacity()); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + // Execute a tensor reduction over rank 2 (the 'W' dimension is reduced; NHWC => NHC) + TensorReduction reduction(src_tensor.extent(), 2); + + cutlass::DeviceAllocation device_workspace(reduction.workspace_size()); + + cutlass::Status status = reduction.reduce( + dst_tensor.device_ref(), + src_tensor.device_ref(), + device_workspace.get(), + reduction_identity + ); + + EXPECT_EQ(status, cutlass::Status::kSuccess); + EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess); + // Reference check + dst_tensor.sync_host(); + + typename TensorReduction::ReductionOp reduction_op; + + for (int n = 0; n < src_tensor.extent().n(); ++n) { + for (int h = 0; h < src_tensor.extent().h(); ++h) { + for (int c = 0; c < src_tensor.extent().c(); ++c) { + + ElementCompute w_accum = reduction_identity; + + for (int w = 0; w < src_tensor.extent().w(); ++w) { + w_accum = reduction_op(w_accum, ElementCompute(src_tensor.at({n, h, w, c}))); + } + + ElementCompute got = ElementCompute(dst_tensor.at({n, h, 0, c})); + + bool equal = (w_accum == got); + + EXPECT_TRUE(equal); + if (!equal) { + + std::cerr + << "Error at location (" << n << ", " << h << ", 0, " << c << ")" << std::endl; + + std::cerr + << " expected: " << w_accum << std::endl + << " got: " << got << std::endl; + + std::cerr + << "Problem: " << src_tensor.extent() << " -> " + << dst_tensor.extent() << std::endl; + + std::cerr + << " Grid: " << reduction.reduction_strided.grid_shape + << "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl + << " Final: " << reduction.reduction_strided.grid_final + << "\n Block: " << reduction.reduction_strided.threadblock_final << "\n"; + + return false; + } + } + } + } + } + } + } + } + + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x8_f16x8) { + + int const kV = 8; + using ElementOutput = float; + using ElementSource = cutlass::half_t; + using ElementCompute = float; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w()); +} + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x2_f16x2) { + + int const kV = 2; + using ElementOutput = float; + using ElementSource = cutlass::half_t; + using ElementCompute = float; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w()); +} + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x1_f16x1) { + + int const kV = 1; + using ElementOutput = float; + using ElementSource = cutlass::half_t; + using ElementCompute = float; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w()); +} + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_reduce_w_s32x4) { + + int const kV = 4; + using Element = int; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + Element, + Element, + Layout, + Functor, + kV, + Element + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w()); +} + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_reduce_w_cf32) { + + int const kV = 1; + using ElementOutput = cutlass::complex; + using ElementSource = cutlass::complex; + using ElementCompute = cutlass::complex; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::plus; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_maximum_w_cf32) { + + int const kV = 1; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::maximum; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w( -std::numeric_limits::max() )); +} + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_minimum_w_cf32) { + + int const kV = 1; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::minimum; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w(std::numeric_limits::max())); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_XOR_w_u32) { + + int const kV = 1; + using ElementOutput = int; + using ElementSource = int; + using ElementCompute = int; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::bit_xor; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_AND_w_s32) { + + int const kV = 1; + using ElementOutput = unsigned; + using ElementSource = unsigned; + using ElementCompute = unsigned; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::bit_and; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w(0xffffffff)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_OR_w_u32) { + + int const kV = 1; + using ElementOutput = int; + using ElementSource = int; + using ElementCompute = int; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::bit_or; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_ANY_w_s32) { + + int const kV = 1; + using ElementOutput = int; + using ElementSource = int; + using ElementCompute = int; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::logical_or; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w(ElementCompute(0))); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_ALL_w_s32) { + + int const kV = 1; + using ElementOutput = int; + using ElementSource = int; + using ElementCompute = int; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::logical_and; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w(ElementCompute(1))); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_ANY_w_f32) { + + int const kV = 1; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::logical_or; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w(ElementCompute(0))); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Test tensor reduction from NHWC to NHC +TEST(Reduction_TensorReduce, nhwc_ALL_w_f32) { + + int const kV = 1; + using ElementOutput = float; + using ElementSource = float; + using ElementCompute = float; + using Layout = cutlass::layout::TensorNHWC; + + // Define the functor + using Functor = cutlass::logical_and; + + using TensorReduction = cutlass::reduction::device::TensorReduction< + ElementOutput, + ElementSource, + Layout, + Functor, + kV, + ElementCompute + >; + + EXPECT_TRUE(TestAllReduction_NHWC_reduce_w(ElementCompute(1))); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3cfbaa287f502337b06b2056b863f00e4d874693 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/CMakeLists.txt @@ -0,0 +1,32 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_reduction_kernel + reduce_splitk.cu + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/reduce_splitk.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/reduce_splitk.cu new file mode 100644 index 0000000000000000000000000000000000000000..2f36d62a7e30dcbe487ab7e209f93b648780ef58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/reduce_splitk.cu @@ -0,0 +1,388 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests for device-wide GEMM interface +*/ + +#include + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/reduction/kernel/reduce_split_k.h" +#include "cutlass/reduction/thread/reduction_operators.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/tensor_view_io.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace reduction { + +template +__global__ void kernel_reduce_splitk(typename ReductionKernel::Params params) { + + __shared__ typename ReductionKernel::SharedStorage shared_storage; + + ReductionKernel reduction_op; + + reduction_op(params, shared_storage); +} + +template +class ReduceSplitKTestbed { +public: + + using ElementAccumulator = typename ReductionKernel::ElementAccumulator; + using ElementWorkspace = typename ReductionKernel::ElementWorkspace; + using ElementOutput = typename ReductionKernel::ElementOutput; + using Layout = cutlass::layout::RowMajor; + +public: + + cutlass::Distribution::Kind distribution_workspace; + cutlass::Distribution::Kind distribution_source; + uint64_t seed; + +public: + + /// Ctor + ReduceSplitKTestbed( + cutlass::Distribution::Kind distribution_workspace = cutlass::Distribution::Uniform, + cutlass::Distribution::Kind distribution_source = cutlass::Distribution::Uniform, + uint64_t seed = 2019 + ): + distribution_workspace(distribution_workspace), + distribution_source(distribution_source), + seed(seed) { + + } + + /// Helper to initialize a tensor view + template + bool initialize_tensor(cutlass::TensorView view, + cutlass::Distribution::Kind dist_kind, + uint64_t seed) { + + if (dist_kind == cutlass::Distribution::Uniform) { + cutlass::reference::host::TensorFillRandomUniform(view, seed, 8, -8, 0); + } + else if (dist_kind == cutlass::Distribution::Gaussian) { + cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, -1); + } else if (dist_kind == cutlass::Distribution::Identity) { + cutlass::reference::host::TensorFillIdentity(view); + } else if (dist_kind == cutlass::Distribution::Sequential) { + cutlass::reference::host::BlockFillSequential(view.data(), + view.capacity()); + } else { + EXPECT_TRUE(false) << "Not implemented"; + return false; + } + + return true; + } + + /// Runs a single problem size + bool run( + cutlass::MatrixCoord problem_size, + int partitions, + ElementAccumulator alpha = 1, + ElementAccumulator beta = 0) { + + cutlass::HostTensor workspace({ + problem_size.row() * partitions, + problem_size.column() + }); + + cutlass::HostTensor source(problem_size); + cutlass::HostTensor destination(problem_size); + cutlass::HostTensor destination_reference(problem_size, false); + + // + // Initialize + // + initialize_tensor(workspace.host_view(), distribution_workspace, seed); + initialize_tensor(source.host_view(), distribution_source, seed + 23); + + cutlass::reference::host::TensorFill(destination.host_view()); + + workspace.sync_device(); + source.sync_device(); + destination.sync_device(); + + // + // Launch reduction kernel + // + + dim3 block = ReductionKernel::block_shape(); + dim3 grid = ReductionKernel::grid_shape(problem_size); + + typename ReductionKernel::Params params( + problem_size, + partitions, + problem_size.row() * problem_size.column(), + workspace.device_ref(), + destination.device_ref(), + source.device_ref(), + {alpha, beta} + ); + + test::reduction::kernel_reduce_splitk<<< grid, block >>>(params); + + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) + << "CUDA error: " << cudaGetErrorString(result); + + destination.sync_host(); + + // + // Compute reference + // + + for (int m = 0; m < problem_size.row(); ++m) { + for (int n = 0; n < problem_size.column(); ++n) { + + ElementAccumulator accum = 0; + + for (int k = 0; k < partitions; ++k) { + accum += ElementAccumulator(workspace.at({m + k * problem_size.row(), n})); + } + + ElementAccumulator c = ElementAccumulator(source.at({m, n})); + + destination_reference.at({m, n}) = ElementOutput(accum * alpha + beta * c); + } + } + + // + // Compare + // + + EXPECT_GT(cutlass::reference::host::TensorNorm(destination.host_view()), 0); + EXPECT_GT(cutlass::reference::host::TensorNorm(destination_reference.host_view()), 0); + + bool passed = cutlass::reference::host::TensorEquals( + destination.host_view(), destination_reference.host_view()); + + EXPECT_TRUE(passed) + << "Workspace =\n" << workspace.host_view() << "\n\n" + << "\n" + << "Reference =\n" << destination_reference.host_view() << "\n\n" + << "Computed =\n" << destination.host_view() << "\n"; + + return passed; + } + + /// Runs through a variety of test cases + bool run_all() { + + cutlass::MatrixCoord problem_sizes[] = { + {8, 8}, + {136, 72}, + {248, 232}, + }; + + int partition_counts[] = { + 1,3,4,5,11 + }; + + bool passed = false; + + for (cutlass::MatrixCoord problem : problem_sizes) { + for (int partitions : partition_counts) { + passed = run(problem, partitions); + if (!passed) { + return false; + } + } + } + + return passed; + } +}; + +} // namespace reduction +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Strictly F32 data +// +TEST(Reduction_ReduceSplitK, f32_f32_f32_1_1x32) { + + using ElementWorkspace = float; + using ElementAccumulator = float; + using ElementOutput = float; + int const kN = 1; + using Shape = cutlass::MatrixShape<1, 32>; + + using OutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + kN, + ElementAccumulator, + ElementAccumulator + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + ElementWorkspace, + kN + >; + + using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< + Shape, + OutputOp, + ReductionOp + >; + + test::reduction::ReduceSplitKTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Vectorized access +// +TEST(Reduction_ReduceSplitK, f32_f32_f32_2_4x64) { + + using ElementWorkspace = float; + using ElementAccumulator = float; + using ElementOutput = float; + int const kN = 2; + using Shape = cutlass::MatrixShape<4, 64>; + + using OutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + kN, + ElementAccumulator, + ElementAccumulator + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + ElementWorkspace, + kN + >; + + using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< + Shape, + OutputOp, + ReductionOp + >; + + test::reduction::ReduceSplitKTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Vectorized access +// +TEST(Reduction_ReduceSplitK, f32_f32_f16_2_4x64) { + + using ElementWorkspace = float; + using ElementAccumulator = float; + using ElementOutput = cutlass::half_t; + int const kN = 2; + using Shape = cutlass::MatrixShape<4, 64>; + + using OutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + kN, + ElementAccumulator, + ElementAccumulator + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + ElementWorkspace, + kN + >; + + using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< + Shape, + OutputOp, + ReductionOp + >; + + test::reduction::ReduceSplitKTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Vectorized access +// +TEST(Reduction_ReduceSplitK, f32_f32_f16_8_4x64) { + + using ElementWorkspace = float; + using ElementAccumulator = float; + using ElementOutput = cutlass::half_t; + int const kN = 8; + using Shape = cutlass::MatrixShape<4, 64>; + + using OutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + kN, + ElementAccumulator, + ElementAccumulator + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + ElementWorkspace, + kN + >; + + using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< + Shape, + OutputOp, + ReductionOp + >; + + test::reduction::ReduceSplitKTestbed testbed; + + EXPECT_TRUE(testbed.run_all()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/reduce_splitk_testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/reduce_splitk_testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..78c720a05b92dff21ffdbf5c7c58c52eb2e15d67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/kernel/reduce_splitk_testbed.h @@ -0,0 +1,45 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level Reduction +*/ + +#pragma once + +#include "cutlass/reduction/thread/reduce.h" + +#include "cutlass/layout/vector.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..12de48f818a66c0396ab0235c428a20b1cb4c7fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/CMakeLists.txt @@ -0,0 +1,33 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_reduction_thread + reduction_thread.cu + testbed.h + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/reduction_thread.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/reduction_thread.cu new file mode 100644 index 0000000000000000000000000000000000000000..be92feab3ea4bc23ee2036d578a40c800b818bdb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/reduction_thread.cu @@ -0,0 +1,100 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level Reduction +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "testbed.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +// + +TEST(Reduce_thread_device, Reduce_half_t_1) { + + test::reduction::thread::Testbed_reduce_device< + cutlass::half_t, + 1 + >().run(); +} + +TEST(Reduce_thread_device, Reduce_half_t_16) { + + test::reduction::thread::Testbed_reduce_device< + cutlass::half_t, + 16 + >().run(); +} + +TEST(Reduce_thread_device, Reduce_half_t_31) { + + test::reduction::thread::Testbed_reduce_device< + cutlass::half_t, + 31 + >().run(); +} + + +TEST(Reduce_thread_host, Reduce_float_1) { + + test::reduction::thread::Testbed_reduce_host< + float, + 1 + >().run(); +} + +TEST(Reduce_thread_host, Reduce_float_16) { + + test::reduction::thread::Testbed_reduce_host< + float, + 16 + >().run(); + +} + +TEST(Reduce_thread_host, Reduce_half_t_1) { + + test::reduction::thread::Testbed_reduce_host< + cutlass::half_t, + 1 + >().run(); +} + +TEST(Reduce_thread_host, Reduce_half_t_16) { + + test::reduction::thread::Testbed_reduce_host< + cutlass::half_t, + 16 + >().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/testbed.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/testbed.h new file mode 100644 index 0000000000000000000000000000000000000000..e0e38ed49de1514dc72b07390339e41abaf77483 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/reduction/thread/testbed.h @@ -0,0 +1,242 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Unit tests for thread-level Reduction +*/ + +#pragma once + +#include "cutlass/reduction/thread/reduce.h" + +#include "cutlass/layout/vector.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" + +namespace test { +namespace reduction { +namespace thread { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the reduction +template < + /// Data type of elements + typename Element, + /// Number of elements + int N +> +struct Testbed_reduce_host { + + /// Thread-level reduction operator + using Reduce = cutlass::reduction::thread::Reduce< + cutlass::plus, + cutlass::Array + >; + + // + // Data members + // + + cutlass::Array tensor_in; + cutlass::Array reduced_tensor_computed; + cutlass::Array reduced_tensor_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed_reduce_host() { + tensor_in.clear(); + reduced_tensor_computed.clear(); + reduced_tensor_reference.clear(); + } + + /// Runs the test + bool run() { + + // + // initialize memory + // + + for(int i = 0; i < N; i++) + tensor_in.at(i) = Element(i); + + + Reduce reduce; + + cutlass::Array *out_ptr = &reduced_tensor_computed; + out_ptr[0] = reduce(tensor_in); + + // + // Reference implementation + // + Element e(0); + for (int i = 0; i < N; i++) + e = e + Element(i); + + reduced_tensor_reference.at(0) = e; + + // + // Verify equivalence + // + + // compare + bool passed = reduced_tensor_reference[0] == reduced_tensor_computed[0]; + + EXPECT_TRUE(passed) + << "Expected = " << float(reduced_tensor_reference.at(0)) << "\n\n" + << "Actual = " << float(reduced_tensor_computed.at(0)) << "\n\n" + << std::endl; + + return passed; + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Thread-level reduction kernel +template +__global__ void kernel_reduce(Element const *array_in, Element *result) { + + /// Thread-level reduction operator + using Reduce = cutlass::reduction::thread::Reduce< + cutlass::plus, + cutlass::Array + >; + + Reduce reduce; + + auto ptr_in = reinterpret_cast const *>(array_in); + auto result_ptr = reinterpret_cast *>(result); + auto in = *ptr_in; + result_ptr[0] = reduce(in); +} + + +/// Structure to compute the reduction +template < + /// Data type of elements + typename Element, + /// Number of elements + int N +> +struct Testbed_reduce_device { + + using Layout = cutlass::layout::PackedVectorLayout; + + // + // Data members + // + + cutlass::HostTensor tensor_in; + cutlass::HostTensor reduced_tensor_computed; + cutlass::HostTensor reduced_tensor_reference; + + // + // Methods + // + + /// Allocates workspace in device memory + Testbed_reduce_device() { + + tensor_in.reset(cutlass::make_Coord(N), true); + reduced_tensor_computed.reset(cutlass::make_Coord(1), true); + reduced_tensor_reference.reset(cutlass::make_Coord(1), true); + } + + + /// Runs the test + bool run() { + + // + // initialize memory + // + + cutlass::reference::host::TensorFill( + tensor_in.host_view(), + Element(1) + ); + + cutlass::reference::host::TensorFill( + reduced_tensor_computed.host_view(), + Element(0) + ); + + cutlass::reference::host::TensorFill( + reduced_tensor_reference.host_view(), + Element(N) + ); + + tensor_in.sync_device(); + reduced_tensor_computed.sync_device(); + reduced_tensor_reference.sync_device(); + + /// call the kernel + kernel_reduce<<< dim3(1, 1), dim3(1, 1, 1) >>> ( + tensor_in.device_data(), + reduced_tensor_computed.device_data() + ); + + // verify no errors + cudaError_t result = cudaDeviceSynchronize(); + + EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); + if (result != cudaSuccess) { + return false; + } + + // Copy back results + reduced_tensor_computed.sync_host(); + + // Verify equivalence + bool passed = cutlass::reference::host::TensorEquals( + reduced_tensor_computed.host_view(), + reduced_tensor_reference.host_view() + ); + + EXPECT_TRUE(passed) + << "Expected = " << reduced_tensor_reference.host_view() << "\n\n" + << "Actual = " << reduced_tensor_computed.host_view() << "\n\n" + << std::endl; + + return passed; + } +}; + +} // namespace thread +} // namespace reduction +} // namespace test diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/substrate/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/substrate/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1ae791c42b0e693da95f47c7894866a89179c90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/substrate/CMakeLists.txt @@ -0,0 +1,33 @@ +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_substrate + + dependent_false.cpp +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/substrate/dependent_false.cpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/substrate/dependent_false.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e32e946aceadd7eb7e400ae55a5f9d69a5b781e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/substrate/dependent_false.cpp @@ -0,0 +1,88 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#include "cutlass_unit_test.h" + +#include +#include "cutlass/detail/dependent_false.hpp" + +namespace { // (anonymous) + +template +void test_dependent_bool_value() +{ + static_assert(cutlass::detail::dependent_bool_value == true); + static_assert(cutlass::detail::dependent_bool_value == false); +} + +template +void test_dependent_false() +{ + static_assert(cutlass::detail::dependent_false == false); +} + +template +void test_all() +{ + test_dependent_bool_value(); + test_dependent_false(); +} + +// Types to use in Args +struct Type0 {}; +struct Type1 {}; +struct Type2 {}; + +} // end namespace (anonymous) + +TEST(LibcudacxxNext, DependentBoolValue) +{ + CUTLASS_TRACE_HOST("-------------------------------"); + CUTLASS_TRACE_HOST("dependent_bool_value"); + CUTLASS_TRACE_HOST("-------------------------------"); + + test_dependent_bool_value(); + test_dependent_bool_value(); + test_dependent_bool_value(); + test_dependent_bool_value(); +} + +TEST(LibcudacxxNext, DependentFalse) +{ + CUTLASS_TRACE_HOST("-------------------------------"); + CUTLASS_TRACE_HOST("dependent_false"); + CUTLASS_TRACE_HOST("-------------------------------"); + + test_dependent_false(); + test_dependent_false(); + test_dependent_false(); + test_dependent_false(); +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..cf5ee6163730fe20c306f1c6bcb57bd6a2c78d54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/CMakeLists.txt @@ -0,0 +1,41 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +add_subdirectory(threadblock) + +add_custom_target( + cutlass_test_unit_transform + DEPENDS + cutlass_test_unit_transform_threadblock + ) + +add_custom_target( + test_unit_transform + DEPENDS + test_unit_transform_threadblock + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..967aa6e803a4ee8ad82173ce239875d3c459bd86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/CMakeLists.txt @@ -0,0 +1,33 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_transform_threadblock + regular_tile_iterator_tensor_op.cu + predicated_tile_iterator.cu + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/predicated_tile_iterator.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/predicated_tile_iterator.cu new file mode 100644 index 0000000000000000000000000000000000000000..e30986bf4a0819965c6657133330257ed448b3d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/predicated_tile_iterator.cu @@ -0,0 +1,798 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Tests cutlass::transform::threadblock::PredicatedTileIterator +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/cutlass.h" + +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" + +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/tensor_fill.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace transform { +namespace threadblock { +namespace kernel { + +/// Copy with an iterator +template +__global__ void copy( + typename Iterator::Params dst_params, + typename Iterator::Element *dst_pointer, + typename Iterator::Params src_params, + typename Iterator::Element *src_pointer, + cutlass::Coord<2> extent) { + + Iterator dst_iterator(dst_params, dst_pointer, extent, threadIdx.x); + Iterator src_iterator(src_params, src_pointer, extent, threadIdx.x); + + int iterations = (extent[1] + Iterator::Shape::kStrided - 1) / Iterator::Shape::kStrided; + + typename Iterator::Fragment frag; + + for(int i = 0; i < frag.size(); i++) + frag[i] = 0; + + src_iterator.load(frag); + dst_iterator.store(frag); + + ++dst_iterator; + ++src_iterator; + + for (; iterations > 1; --iterations) { + + src_iterator.load(frag); + dst_iterator.store(frag); + + ++dst_iterator; + ++src_iterator; + } +} + +} // namespace kernel +} // namespace threadblock +} // namespace transform +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined) { + + using Shape = cutlass::layout::PitchLinearShape<64, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator< + Shape, Element, Layout, 1, ThreadMap + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(57, 35); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 35); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity()); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]; ++s) { + for (int c = 0; c < alloc_extent[0]; ++c) { + + Element expected = Element(0); + + if (c < copy_extent[0] && s < copy_extent[1]) { + expected = src_tensor.at({c, s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({c, s}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + + if (!equal) { + return; + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_128x4) { + + using Shape = cutlass::layout::PitchLinearShape<128, 4>; + using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int8_t; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + Shape, Element, Layout, 1, ThreadMap, false + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(128, 4); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(128, 4); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity()); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]; ++s) { + for (int c = 0; c < alloc_extent[0]; ++c) { + + Element expected = Element(0); + + if (c < copy_extent[0] && s < copy_extent[1]) { + expected = src_tensor.at({c, s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({c, s}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + + if (!equal) { + return; + } + } + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_128x64) { + + using Shape = cutlass::layout::PitchLinearShape<128, 64>; + using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int8_t; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + Shape, Element, Layout, 1, ThreadMap + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(128, 64); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(128, 64); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity()); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]; ++s) { + for (int c = 0; c < alloc_extent[0]; ++c) { + + Element expected = Element(0); + + if (c < copy_extent[0] && s < copy_extent[1]) { + expected = src_tensor.at({c, s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({c, s}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + + if (!equal) { + return; + } + } + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x64) { + + using Shape = cutlass::layout::PitchLinearShape<64, 64>; + using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int8_t; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + Shape, Element, Layout, 1, ThreadMap + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 64); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 64); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity()); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]; ++s) { + for (int c = 0; c < alloc_extent[0]; ++c) { + + Element expected = Element(0); + + if (c < copy_extent[0] && s < copy_extent[1]) { + expected = src_tensor.at({c, s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({c, s}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + + if (!equal) { + return; + } + } + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x8) { + + using Shape = cutlass::layout::PitchLinearShape<64, 8>; + using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int8_t; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + Shape, Element, Layout, 1, ThreadMap + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(32, 8); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 8); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity()); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]; ++s) { + for (int c = 0; c < alloc_extent[0]; ++c) { + + Element expected = Element(0); + + if (c < copy_extent[0] && s < copy_extent[1]) { + expected = src_tensor.at({c, s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({c, s}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + + if (!equal) { + return; + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x32_transpose4x4) { + + using Shape = cutlass::layout::PitchLinearShape<64, 8>; + using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int8_t; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + Shape, Element, Layout, 1, ThreadMap, true + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 32); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 32); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + uint64_t seed = 7; + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]/4; ++s) { + for (int c = 0; c < alloc_extent[0]/4; ++c) { + for (int s1 = 0; s1 < 4; s1++){ + for(int c1 = 0; c1 < 4; c1++){ + Element expected = Element(0); + + int l_c = c * 4 + c1; + int l_s = s * 4 + s1; + + int l_tc = c * 4 + s1; + int l_ts = s * 4 + c1; + + if (l_c < copy_extent[0] && l_s < copy_extent[1]) { + expected = src_tensor.at({l_c, l_s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({l_tc, l_ts}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + + if (!equal) { + return; + } + } + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x29_transpose4x4) { + + using Shape = cutlass::layout::PitchLinearShape<64, 8>; + using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int8_t; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + Shape, Element, Layout, 1, ThreadMap, true + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 29); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 29); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + uint64_t seed = 7; + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]/4; ++s) { + for (int c = 0; c < alloc_extent[0]/4; ++c) { + for (int s1 = 0; s1 < 4; s1++){ + for(int c1 = 0; c1 < 4; c1++){ + Element expected = Element(0); + + int l_c = c * 4 + c1; + int l_s = s * 4 + s1; + + int l_tc = c * 4 + s1; + int l_ts = s * 4 + c1; + + if (l_c < copy_extent[0] && l_s < copy_extent[1]) { + expected = src_tensor.at({l_c, l_s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({l_tc, l_ts}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + + if (!equal) { + return; + } + } + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_120x4_transpose4x4) { + + using Shape = cutlass::layout::PitchLinearShape<128, 4>; + using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int8_t; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + Shape, Element, Layout, 1, ThreadMap, true + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(120, 4); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(120, 4); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + uint64_t seed = 7; + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]/4; ++s) { + for (int c = 0; c < alloc_extent[0]/4; ++c) { + for (int s1 = 0; s1 < 4; s1++){ + for(int c1 = 0; c1 < 4; c1++){ + Element expected = Element(0); + + int l_c = c * 4 + c1; + int l_s = s * 4 + s1; + + int l_tc = c * 4 + s1; + int l_ts = s * 4 + c1; + + if (l_c < copy_extent[0] && l_s < copy_extent[1]) { + expected = src_tensor.at({l_c, l_s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({l_tc, l_ts}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + + if (!equal) { + return; + } + } + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_48x29_transpose4x4) { + + using Shape = cutlass::layout::PitchLinearShape<64, 8>; + using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>; + using Layout = cutlass::layout::PitchLinear; + using Element = int8_t; + static int const kThreads = 32; + + using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap; + + using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + Shape, Element, Layout, 1, ThreadMap, true + >; + + cutlass::Coord<2> copy_extent = cutlass::make_Coord(48, 29); + cutlass::Coord<2> alloc_extent = cutlass::make_Coord(48, 29); + + cutlass::HostTensor src_tensor(alloc_extent); + cutlass::HostTensor dst_tensor(alloc_extent); + + Element oob_value = Element(-1); + uint64_t seed = 7; + cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); + cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0); + + dst_tensor.sync_device(); + src_tensor.sync_device(); + + typename Iterator::Params dst_params(dst_tensor.layout()); + typename Iterator::Params src_params(src_tensor.layout()); + + dim3 block(kThreads, 1); + dim3 grid(1, 1); + + test::transform::threadblock::kernel::copy<<< grid, block >>>( + dst_params, + dst_tensor.device_data(), + src_params, + src_tensor.device_data(), + copy_extent + ); + + cudaError_t result = cudaGetLastError(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result); + + dst_tensor.sync_host(); + + for (int s = 0; s < alloc_extent[1]/4; ++s) { + for (int c = 0; c < alloc_extent[0]/4; ++c) { + for (int s1 = 0; s1 < 4; s1++){ + for(int c1 = 0; c1 < 4; c1++){ + Element expected = Element(0); + + int l_c = c * 4 + c1; + int l_s = s * 4 + s1; + + int l_tc = c * 4 + s1; + int l_ts = s * 4 + c1; + + if (l_c < copy_extent[0] && l_s < copy_extent[1]) { + expected = src_tensor.at({l_c, l_s}); + } + else { + expected = oob_value; + } + + Element got = dst_tensor.at({l_tc, l_ts}); + bool equal = (expected == got); + + EXPECT_EQ(expected, got) + << "Source:\n" << src_tensor.host_view() << "\n\n" + << "Destination:\n" << dst_tensor.host_view() << "\n"; + if (!equal) { + return; + } + } + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..c5ad3e98e162c2cda20f4037db0f021b7c4d429e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu @@ -0,0 +1,289 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief +*/ + +#include "../../common/cutlass_unit_test.h" + +#include "cutlass/cutlass.h" +#include "cutlass/core_io.h" +#include "cutlass/layout/pitch_linear.h" + +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/tensor_fill.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace test { +namespace gemm { +namespace threadblock { + +/// +template +__global__ void kernel_gemm_threadblock_tensor_op_multiplicand_store( + typename Iterator::TensorRef ref_output, + typename Iterator::Element *input) { + + // Construct fragment + typename Iterator::Fragment frag; + + frag.clear(); + + // each thread loads a fragment + using AccessType = cutlass::Array; + + int const kElementsPerAccess = Iterator::ThreadMap::kElementsPerAccess; + int stride = Iterator::Shape::kContiguous; + + int warp_id = (threadIdx.x / 32); + int lane_id = (threadIdx.x % 32); + + input += (lane_id % 8) * kElementsPerAccess + (lane_id / 8) * stride; + + input += (warp_id * Iterator::Shape::kStrided / Iterator::ThreadMap::Detail::kWarpCount) * stride; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Iterator::ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Iterator::ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < Iterator::ThreadMap::kElementsPerAccess; ++v) { + frag[v + Iterator::ThreadMap::kElementsPerAccess * (c + s * Iterator::ThreadMap::Iterations::kContiguous)] = + input[v + c * 64 + s * Iterator::ThreadMap::Delta::kStrided * stride]; + } + } + } + + // Use iterator to store results + Iterator iter(ref_output, threadIdx.x); + iter.store(frag); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Simple test environment +template < + typename Shape_, + int WarpCount +> +class MultiplicandTileIteratorTestbed { +public: + + // + // Define iterator + // + + using Shape = Shape_; + using Element = cutlass::half_t; + using Layout = cutlass::layout::TensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + static int const kAdvanceRank = 1; + static int const kThreads = 32 * WarpCount; + + using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap< + Shape, + kThreads, + cutlass::layout::PitchLinearShape<8, 4>, + 128 / cutlass::sizeof_bits::value + >; + + using Iterator = cutlass::transform::threadblock::RegularTileIterator< + Shape, Element, Layout, kAdvanceRank, ThreadMap + >; + +public: + + // + // Members + // + + cutlass::HostTensor destination_tensor; + cutlass::HostTensor source_tensor; + + +public: + + MultiplicandTileIteratorTestbed(): + destination_tensor({Shape::kContiguous, Shape::kStrided}), + source_tensor({Shape::kContiguous, Shape::kStrided}) { + + } + + bool run() { + + cutlass::reference::host::BlockFillSequential( + source_tensor.host_data(), + source_tensor.capacity() + ); + + cutlass::reference::host::BlockFillSequential( + destination_tensor.host_data(), + destination_tensor.capacity(), + Element(0), + Element(0) + ); + + // + // Launch kernel + // + + dim3 grid(1,1); + dim3 block(kThreads, 1); + + destination_tensor.sync_device(); + source_tensor.sync_device(); + + test::gemm::threadblock::kernel_gemm_threadblock_tensor_op_multiplicand_store<<< + grid, block + >>>( + destination_tensor.device_ref(), + source_tensor.device_data() + ); + + cudaError_t result = cudaDeviceSynchronize(); + EXPECT_EQ(result, cudaSuccess) << " - CUDA ERROR: " << cudaGetErrorString(result); + + destination_tensor.sync_host(); + + // + // Verify + // + + // Verify that its contents match the destination + int errors = 0; + for (int s = 0; s < Shape::kStrided; ++s) { + for (int c = 0; c < Shape::kContiguous; ++c) { + + if (errors >= 10) { + break; + } + + Element expected = source_tensor.at({c, s}); + Element got = destination_tensor.at({c, s}); + + bool passed = (expected == got); + if (!passed) { + ++errors; + } + } + } + + EXPECT_EQ(errors, 0) + << source_tensor.host_view() << "\n\n" << destination_tensor.host_view() << std::endl; + + return !errors; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace test + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x8_w1) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<64, 8>, 1>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x16_w1) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<64, 16>, 1>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x16_w2) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<64, 16>, 2>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x8_w1) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<128, 8>, 1>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x32_w4) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<64, 32>, 4>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x32_w1) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<128, 32>, 1>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x32_w4) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<128, 32>, 4>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 256x32_w4) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<256, 32>, 4>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 256x32_w8) { + + test::gemm::threadblock::MultiplicandTileIteratorTestbed< + cutlass::layout::PitchLinearShape<256, 32>, 8>().run(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..0e88b065bcc7d23aa01cf71ae3799a0dd0937696 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/CMakeLists.txt @@ -0,0 +1,34 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cutlass_test_unit_add_executable( + cutlass_test_unit_util + tensor_reduce.cu + cutlass_test_levels.cu + rms_norm.cu + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/cutlass_test_levels.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/cutlass_test_levels.cu new file mode 100644 index 0000000000000000000000000000000000000000..38797839d75458e4bd3c48fc9ce17b6f4f1270ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/cutlass_test_levels.cu @@ -0,0 +1,77 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#include + +#include "../common/cutlass_unit_test.h" + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(SM75_CUTLASS_TEST, level_not_specified) { + + EXPECT_TRUE(true); +} + +TEST(SM80_CUTLASS_TEST, level_not_specified) { + + EXPECT_TRUE(true); +} + +CUTLASS_TEST_L0(SM75_CUTLASS_TEST, level0, { + + EXPECT_TRUE(true); +}) + +CUTLASS_TEST_L1(SM75_CUTLASS_TEST, level1, { + + EXPECT_TRUE(true); +}) + +CUTLASS_TEST_L2(SM75_CUTLASS_TEST, level2, { + + EXPECT_TRUE(true); +}) + +CUTLASS_TEST_L0(SM80_CUTLASS_TEST, level0, { + + EXPECT_TRUE(true); +}) + +CUTLASS_TEST_L1(SM80_CUTLASS_TEST, level1, { + + EXPECT_TRUE(true); +}) + +CUTLASS_TEST_L2(SM80_CUTLASS_TEST, level2, { + + EXPECT_TRUE(true); +}) +//////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/rms_norm.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/rms_norm.cu new file mode 100644 index 0000000000000000000000000000000000000000..a3e6595daebff8621cb3569bb1260a464ff5e817 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/rms_norm.cu @@ -0,0 +1,123 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#include "../common/cutlass_unit_test.h" + +#include "cutlass/util/device_rmsnorm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/constants.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_compare.h" + +using ElementType = cutlass::half_t; +using Layout = cutlass::layout::RowMajor; + +void rmsnorm_host(cutlass::MatrixCoord tensor_size, + cutlass::TensorRef output, + cutlass::TensorRef input, + cutlass::TensorRef weight) { + const int M = tensor_size.row(); + const int N = tensor_size.column(); + + for (int m = 0; m < M; ++m) { + float square_sum{0}; + + for (int n = 0; n < N; ++n) { + float inp = static_cast(input.at({m, n})); + square_sum += inp * inp; + } + + float sq_mean = square_sum / (float)N; + float sqrt_var = cutlass::fast_sqrt(sq_mean + (float)1e-6); + + for (int n = 0; n < N; ++n) { + float inp = static_cast(input.at({m, n})); + float g = static_cast(weight.at({0, n})); + float res_fp32 = inp / sqrt_var * g; + output.at({m, n}) = ElementType(res_fp32); + } + } +} + +void run_test(int M, int N) { + cutlass::HostTensor input, output_ref, output, weight; + input.reset({M, N}); + output.reset({M, N}); + output_ref.reset({M, N}); + weight.reset({1, N}); + + const unsigned seed = 2022; + + cutlass::reference::host::TensorFillRandomUniform(input.host_view(), + seed, + ElementType(5), + ElementType(-5), + 0); + + cutlass::reference::host::TensorFillRandomUniform(weight.host_view(), + seed, + ElementType(5), + ElementType(-5), + 0); + + input.sync_device(); + weight.sync_device(); + + rmsnorm_host({M, N}, output_ref.host_ref(), input.host_ref(), weight.host_ref()); + cutlass::rmsnorm({M, N}, output.device_ref(), + input.device_ref(), weight.device_ref(), NULL); + + output.sync_host(); + + float max_abs_diff = -1; + float mean_abs_diff = 0; + for (int m = 0; m < M; ++m) { + for (int n = 0; n < N; ++n) { + auto diff = abs(static_cast(output_ref.at({m, n}) - output.at({m, n}))); + mean_abs_diff += diff; + max_abs_diff = max(max_abs_diff, diff); + } + } + + mean_abs_diff /= float(M * N); + + EXPECT_TRUE(max_abs_diff < 0.001f && mean_abs_diff < 0.001f) + << "Max absolute difference : " << max_abs_diff << "\n" + << "Mean absolute difference: " << mean_abs_diff; +} + +TEST(RMSNorm, 16x1024) { + run_test(16, 1024); +} + +TEST(RMSNorm, 1x127) { + run_test(1, 127); +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/tensor_reduce.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/tensor_reduce.cu new file mode 100644 index 0000000000000000000000000000000000000000..c71d080ed4109534ce2dffcb1a63eb0f22d4ce23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/test/unit/util/tensor_reduce.cu @@ -0,0 +1,244 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#include + +#include "../common/cutlass_unit_test.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" + +#include "cutlass/util/reference/device/tensor_reduce.h" +#include "cutlass/util/reference/host/tensor_norm.h" +#include "cutlass/util/host_tensor.h" + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(TensorReduce, norm_rowmajor_f32) { + + int const kM = 129; + int const kN = 91; + + cutlass::HostTensor tensor({kM, kN}); + + for (int m = 0; m < kM; ++m) { + for (int n = 0; n < kN; ++n) { + + float x = float(((m * kN + m + 7) % 8) - 4); + + tensor.at({m, n}) = x; + } + } + + tensor.sync_device(); + + double device_norm = cutlass::reference::device::TensorNorm(tensor.device_view(), double()); + double host_norm = cutlass::reference::host::TensorNorm(tensor.host_view(), double()); + + EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001); +} + +TEST(TensorReduce, norm_nhwc_f32) { + + int const kN = 19; + int const kH = 18; + int const kW = 17; + int const kC = 16; + + cutlass::HostTensor tensor({kN, kH, kW, kC}); + + int idx = 0; + + double computed_norm = double(); + + for (int n = 0; n < kN; ++n) { + for (int h = 0; h < kH; ++h) { + for (int w = 0; w < kW; ++w) { + for (int c = 0; c < kC; ++c, ++idx) { + + float x = float(((idx + 7) % 8) - 4); + + computed_norm += double(x) * double(x); + + tensor.at({n, h, w, c}) = x; + } + } + } + } + + computed_norm = std::sqrt(computed_norm); + + tensor.sync_device(); + + double device_norm = cutlass::reference::device::TensorNorm(tensor.device_view(), double()); + double host_norm = cutlass::reference::host::TensorNorm(tensor.host_view(), double()); + + EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001 && std::abs(computed_norm - host_norm) < 0.001) + << "computed norm: " << computed_norm << "\n" + << " host norm: " << host_norm << "\n" + << "device norm: " << device_norm << "\n"; +} + +TEST(TensorReduce, norm_nhwc_f16) { + + int const kN = 69; + int const kH = 68; + int const kW = 67; + int const kC = 66; + + cutlass::HostTensor tensor({kN, kH, kW, kC}); + + int idx = 0; + + double computed_norm = double(); + + for (int n = 0; n < kN; ++n) { + for (int h = 0; h < kH; ++h) { + for (int w = 0; w < kW; ++w) { + for (int c = 0; c < kC; ++c, ++idx) { + + float x = float(((idx + 7) % 8) - 4); + computed_norm += double(x) * double(x); + + tensor.at({n, h, w, c}) = cutlass::half_t(x); + } + } + } + } + + computed_norm = std::sqrt(computed_norm); + + tensor.sync_device(); + + double device_norm = cutlass::reference::device::TensorNorm(tensor.device_view(), double()); + double host_norm = cutlass::reference::host::TensorNorm(tensor.host_view(), double()); + + EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001 && std::abs(computed_norm - host_norm) < 0.001) + << "computed norm: " << computed_norm << "\n" + << " host norm: " << host_norm << "\n" + << "device norm: " << device_norm << "\n"; +} + +TEST(TensorReduce, norm_diff_nhwc_f32) { + + int const kN = 59; + int const kH = 24; + int const kW = 57; + int const kC = 78; + + using Layout = cutlass::layout::TensorNHWC; + + cutlass::HostTensor tensor_A({kN, kH, kW, kC}); + cutlass::HostTensor tensor_B({kN, kH, kW, kC}); + + + int idx = 0; + + double sum_sq_diff = 0; + + for (int n = 0; n < kN; ++n) { + for (int h = 0; h < kH; ++h) { + for (int w = 0; w < kW; ++w) { + for (int c = 0; c < kC; ++c, ++idx) { + + float a = float(((idx * 5 + 7) % 8) - 4); + float b = float(((idx * 3 + 7) % 8) - 4); + + sum_sq_diff += double(a - b) * double(a - b); + + tensor_A.at({n, h, w, c}) = a; + tensor_B.at({n, h, w, c}) = b; + } + } + } + } + + tensor_A.sync_device(); + tensor_B.sync_device(); + + double device_norm = cutlass::reference::device::TensorNormDiff( + tensor_A.device_view(), tensor_B.device_view(), double()); + + double host_norm = std::sqrt(sum_sq_diff); + + EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001f) + << " host norm: " << host_norm << "\n" + << "device norm: " << device_norm; +} + + +TEST(TensorReduce, norm_diff_nhwc_f16) { + + int const kN = 59; + int const kH = 24; + int const kW = 57; + int const kC = 78; + + using Layout = cutlass::layout::TensorNHWC; + + cutlass::HostTensor tensor_A({kN, kH, kW, kC}); + cutlass::HostTensor tensor_B({kN, kH, kW, kC}); + + int idx = 0; + + double sum_sq_diff = 0; + + for (int n = 0; n < kN; ++n) { + for (int h = 0; h < kH; ++h) { + for (int w = 0; w < kW; ++w) { + for (int c = 0; c < kC; ++c, ++idx) { + + float a = float(((idx * 5 + 7) % 8) - 4); + float b = float(((idx * 3 + 7) % 8) - 4); + + sum_sq_diff += double(a - b) * double(a - b); + + tensor_A.at({n, h, w, c}) = cutlass::half_t(a); + tensor_B.at({n, h, w, c}) = cutlass::half_t(b); + } + } + } + } + + tensor_A.sync_device(); + tensor_B.sync_device(); + + double device_norm = cutlass::reference::device::TensorNormDiff( + tensor_A.device_view(), tensor_B.device_view(), double()); + + double host_norm = std::sqrt(sum_sq_diff); + + EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001f) + << " host norm: " << host_norm << "\n" + << "device norm: " << device_norm; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..1d240bc4e2a8e128e1be9026dd163de01dee06dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/CMakeLists.txt @@ -0,0 +1,46 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cmake_policy(SET CMP0112 NEW) + +add_subdirectory(util) + +if (CUTLASS_ENABLE_LIBRARY) + add_subdirectory(library) +endif() + +if (CUTLASS_ENABLE_PROFILER) + if (NOT CUTLASS_ENABLE_LIBRARY) + message(SEND_ERROR "Build conflict: The CUTLASS profiler requires the CUTLASS library.") + message(SEND_ERROR " CUTLASS_ENABLE_PROFILER = ${CUTLASS_ENABLE_PROFILER}") + message(SEND_ERROR " CUTLASS_ENABLE_LIBRARY = ${CUTLASS_ENABLE_LIBRARY}") + else() + add_subdirectory(profiler) + endif() +endif() + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..a11ebcf6d29cc5dca01d068dab2c7fa9b794b800 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/CMakeLists.txt @@ -0,0 +1,303 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cmake_policy(SET CMP0112 NEW) + +include(GNUInstallDirs) + +################################################################################ + +set(CUTLASS_BUILD_MONO_LIBRARY OFF CACHE BOOL + "Determines whether the cutlass library is generated as a single file or multiple files.") + +################################################################################ + +add_library(cutlass_library_includes INTERFACE) +add_library(nvidia::cutlass::library::includes ALIAS cutlass_library_includes) +set_target_properties(cutlass_library_includes PROPERTIES EXPORT_NAME library::includes) + +target_include_directories( + cutlass_library_includes + INTERFACE + $ + $ + ) + +target_link_libraries( + cutlass_library_includes + INTERFACE + CUTLASS + cutlass_tools_util_includes + ) + +install( + TARGETS cutlass_library_includes + EXPORT NvidiaCutlass + ) + +install( + DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/ + ) + +add_library(cutlass_library_internal_interface INTERFACE) +add_library(nvidia::cutlass::library::obj_interface ALIAS cutlass_library_internal_interface) + +target_include_directories( + cutlass_library_internal_interface + INTERFACE + $ + $ + ) + +target_link_libraries( + cutlass_library_internal_interface + INTERFACE + cutlass_library_includes + ) + +################################################################################ + +function(cutlass_add_cutlass_library) +# +# Generates static and shared libraries with the given SOURCES. The public CMake +# targets produces will be cutlass_library(_${SUFFIX})? and +# cutlass_library(_${SUFFIX})?_static. +# +# SUFFIX: An additional string to be joined to the default names. If suffix is given, +# the generated libraries will be linked as a dependency of the main cutlass library. + + set(options) + set(oneValueArgs SUFFIX) + set(multiValueArgs) + cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + set(DEFAULT_NAME cutlass_library) + + set(__NAME ${DEFAULT_NAME}) + set(__OUTPUT_NAME cutlass) + set(__EXPORT_NAME library) + + if (__SUFFIX) + string(APPEND __NAME _${__SUFFIX}) + string(APPEND __OUTPUT_NAME _${__SUFFIX}) + string(APPEND __EXPORT_NAME _${__SUFFIX}) + endif() + + cutlass_add_library( + ${__NAME}_objs + OBJECT + ${__UNPARSED_ARGUMENTS} + ) + + target_link_libraries(${__NAME}_objs + PUBLIC cutlass_library_includes + PRIVATE cutlass_library_internal_interface + ) + + if (CUTLASS_BUILD_MONO_LIBRARY AND __SUFFIX) + + # If we're only building a single monolithic library then we + # simply link the generated object files to the default library. + + target_link_libraries(${DEFAULT_NAME} PRIVATE $) + target_link_libraries(${DEFAULT_NAME}_static PRIVATE $) + + else() + + cutlass_add_library( + ${__NAME} + SHARED + EXPORT_NAME ${__EXPORT_NAME} + "" + ) + + set_target_properties( + ${__NAME} + PROPERTIES + OUTPUT_NAME ${__OUTPUT_NAME} + WINDOWS_EXPORT_ALL_SYMBOLS 1 + ) + + target_link_libraries( + ${__NAME} + PUBLIC cutlass_library_includes + PRIVATE $ + cuda_driver + ) + + set_target_properties(${__NAME} PROPERTIES DEBUG_POSTFIX "${CUTLASS_LIBRARY_DEBUG_POSTFIX}") + + cutlass_add_library( + ${__NAME}_static + STATIC + EXPORT_NAME ${__EXPORT_NAME}_static + "" + ) + + if (WIN32) + set(STATIC_OUTPUT_NAME ${__OUTPUT_NAME}.static) + else() + set(STATIC_OUTPUT_NAME ${__OUTPUT_NAME}) + endif() + + set_target_properties( + ${__NAME}_static + PROPERTIES + OUTPUT_NAME ${STATIC_OUTPUT_NAME} + WINDOWS_EXPORT_ALL_SYMBOLS 1 + ) + + target_link_libraries( + ${__NAME}_static + PUBLIC cutlass_library_includes + PRIVATE $ + cuda_driver + ) + + set_target_properties(${__NAME}_static PROPERTIES DEBUG_POSTFIX "${CUTLASS_LIBRARY_DEBUG_POSTFIX}") + + install( + TARGETS ${__NAME} ${__NAME}_static + EXPORT NvidiaCutlass + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + + if (__SUFFIX) + + # The partial libraries generated will be registered as linked libraries + # to the main cutlass library so users automatically get the necessary link + # commands to pull in all kernels by default. + + target_link_libraries(${DEFAULT_NAME} INTERFACE ${__NAME}) + target_link_libraries(${DEFAULT_NAME}_static INTERFACE ${__NAME}_static) + + endif() + + endif() + +endfunction() + +################################################################################ + +cutlass_add_cutlass_library( + + src/handle.cu + src/manifest.cpp + src/operation_table.cu + src/singleton.cu + src/util.cu + + # files split for parallel compilation + src/reference/gemm_int4.cu + src/reference/gemm_int8_canonical.cu + src/reference/gemm_int8_interleaved_32.cu + src/reference/gemm_int8_interleaved_64.cu + src/reference/gemm_e4m3a_e4m3out.cu + src/reference/gemm_e5m2a_e4m3out.cu + src/reference/gemm_e4m3a_e5m2out.cu + src/reference/gemm_e5m2a_e5m2out.cu + src/reference/gemm_fp8in_fp16out.cu + src/reference/gemm_fp8in_bf16out.cu + src/reference/gemm_fp8in_fp32out.cu + src/reference/gemm_fp32out.cu + src/reference/gemm_fp_other.cu + src/reference/initialize_reference_operations.cu + + # cutlass reduction instances in cutlass library + + src/reduction/reduction_device.cu + src/reduction/init_reduction_operations.cu + + # cutlass conv reference instances in cutlass library + + src/reference/conv2d.cu + src/reference/conv3d.cu + + ) + +# For backward compatibility with the old name +add_library(cutlass_lib ALIAS cutlass_library) + +################################################################################ + +file(GLOB_RECURSE GENERATOR_PYTHON_SOURCES CONFIGURE_DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/scripts/*.py) + +# +# auto-instantiation of CUTLASS kernels +# + +# set cutlass generator compiler version to filter kernels in the generator not supported by a specific toolkit. +set(CUTLASS_GENERATOR_CUDA_COMPILER_VERSION ${CMAKE_CUDA_COMPILER_VERSION}) +set(CUTLASS_LIBRARY_GENERATED_KERNEL_LIST_FILE ${CMAKE_CURRENT_BINARY_DIR}/generated_kernels.txt CACHE STRING "Generated kernel listing file") + +# --log-level is set to DEBUG to enable printing information about which kernels were excluded +# from generation in /python/cutlass_library/manifest.py. To avoid having this information appear +# in ${CMAKE_CURRENT_BINARY_DIR}/library_instance_generation.log, set this parameter to INFO +execute_process( + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../../python/cutlass_library + COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/../../python/cutlass_library/generator.py + --operations "${CUTLASS_LIBRARY_OPERATIONS}" + --build-dir ${PROJECT_BINARY_DIR} + --curr-build-dir ${CMAKE_CURRENT_BINARY_DIR} + --generator-target library + --architectures "${CUTLASS_NVCC_ARCHS_ENABLED}" + --kernels "${CUTLASS_LIBRARY_KERNELS}" + --ignore-kernels "${CUTLASS_LIBRARY_IGNORE_KERNELS}" + --selected-kernel-list "${CUTLASS_LIBRARY_GENERATED_KERNEL_LIST_FILE}" + --cuda-version "${CUTLASS_GENERATOR_CUDA_COMPILER_VERSION}" + --log-level DEBUG + RESULT_VARIABLE cutlass_lib_INSTANCE_GENERATION_RESULT + OUTPUT_VARIABLE cutlass_lib_INSTANCE_GENERATION_OUTPUT + OUTPUT_FILE ${CMAKE_CURRENT_BINARY_DIR}/library_instance_generation.log + ERROR_FILE ${CMAKE_CURRENT_BINARY_DIR}/library_instance_generation.log +) + +if(NOT cutlass_lib_INSTANCE_GENERATION_RESULT EQUAL 0) + message(FATAL_ERROR "Error generating library instances. See ${CMAKE_CURRENT_BINARY_DIR}/library_instance_generation.log") +endif() + +message(STATUS "Completed generation of library instances. See ${CMAKE_CURRENT_BINARY_DIR}/library_instance_generation.log for more information.") + +# include auto-instantiated kernels in he CUTLASS Deliverables Library +set(CUTLASS_LIBRARY_MANIFEST_CMAKE_FILE ${CMAKE_CURRENT_BINARY_DIR}/generated/manifest.cmake) +if(EXISTS "${CUTLASS_LIBRARY_MANIFEST_CMAKE_FILE}") + include(${CUTLASS_LIBRARY_MANIFEST_CMAKE_FILE}) +else() + message(STATUS "auto-generated library manifest cmake file (${CUTLASS_LIBRARY_MANIFEST_CMAKE_FILE}) not found.") +endif() + +################################################################################ + +install( + FILES ${CUTLASS_LIBRARY_GENERATED_KERNEL_LIST_FILE} + DESTINATION ${CMAKE_INSTALL_INFODIR}/cutlass/ + ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/arch_mappings.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/arch_mappings.h new file mode 100644 index 0000000000000000000000000000000000000000..a48c173e926f93593e85c95c06ff1af32e6b70d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/arch_mappings.h @@ -0,0 +1,116 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + + \brief CUTLASS Library is an object-oriented approach to managing operations implemented by CUTLASS. + + Generally, + + description - compile-time constant parameters used to instantiate an operation + + configuration - runtime parameters with computationally expensive initialization + + arguments - runtime parameters that may be passed to an initialized operation with low + computational overhead +*/ + +#pragma once + +#include "cutlass/arch/mma.h" +#include "cutlass/arch/arch.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template struct ArchMap; + +template <> struct ArchMap { + static int const kMin = 50; + static int const kMax = 1024; +}; + +template <> struct ArchMap { + static int const kMin = 60; + static int const kMax = 1024; +}; + +template <> struct ArchMap { + static int const kMin = 61; + static int const kMax = 1024; +}; + +template <> struct ArchMap { + static int const kMin = 70; + static int const kMax = 1024; +}; + +template <> struct ArchMap { + static int const kMin = 70; + static int const kMax = 75; +}; + +template struct ArchMap { + static int const kMin = 75; + static int const kMax = 1024; +}; + +template struct ArchMap { + static int const kMin = 80; + static int const kMax = 1024; +}; + +template struct ArchMap { + static int const kMin = 86; + static int const kMax = 1024; +}; + +template struct ArchMap { + static int const kMin = 90; + static int const kMax = 1024; +}; + +// Arch conditional WGMMA +template <> struct ArchMap { + static int const kMin = 90; + static int const kMax = 90; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/descriptions.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/descriptions.h new file mode 100644 index 0000000000000000000000000000000000000000..e866996529cf9fb64f853d776ab52c3665bb5dac --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/descriptions.h @@ -0,0 +1,601 @@ +/*************************************************************************************************** + * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include +#include +#include + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +struct MathInstructionDescription { + + /// Shape of the target math instruction + cutlass::gemm::GemmCoord instruction_shape; + + /// Describes the data type of the internal accumulator + NumericTypeID element_accumulator; + + /// Classification of math instruction + OpcodeClassID opcode_class; + + /// Type of math operation performed + MathOperationID math_operation; + + // + // Methods + // + + MathInstructionDescription( + cutlass::gemm::GemmCoord instruction_shape = cutlass::gemm::GemmCoord(), + NumericTypeID element_accumulator = NumericTypeID::kInvalid, + OpcodeClassID opcode_class = OpcodeClassID::kInvalid, + MathOperationID math_operation = MathOperationID::kMultiplyAdd + ): + instruction_shape(instruction_shape), + element_accumulator(element_accumulator), + opcode_class(opcode_class), + math_operation(math_operation) {} + + // Equality operator + inline + bool operator==(MathInstructionDescription const& rhs) const{ + return ( + (instruction_shape == rhs.instruction_shape) && + (element_accumulator == rhs.element_accumulator) && + (opcode_class == rhs.opcode_class) && + (math_operation == rhs.math_operation)); + } + + // Inequality operator + inline + bool operator!=(MathInstructionDescription const& rhs) const { + return !(*this == rhs); + } + +}; + +/// Structure describing the tiled structure of a GEMM-like computation +struct TileDescription { + + /// Describes the shape of a threadblock (in elements) + cutlass::gemm::GemmCoord threadblock_shape; + + /// Describes the number of pipeline stages in the threadblock-scoped mainloop + int threadblock_stages; + + /// Number of warps in each logical dimension + cutlass::gemm::GemmCoord warp_count; + + /// Core math instruction + MathInstructionDescription math_instruction; + + /// Minimum compute capability (e.g. 70, 75) of a device eligible to run the operation. + int minimum_compute_capability; + + /// Minimum compute capability (e.g. 70, 75) of a device eligible to run the operation. + int maximum_compute_capability; + + /// Describes the shape of a cluster (in blocks) + cutlass::gemm::GemmCoord cluster_shape; + + // + // Methods + // + + TileDescription( + cutlass::gemm::GemmCoord threadblock_shape = cutlass::gemm::GemmCoord(), + int threadblock_stages = 0, + cutlass::gemm::GemmCoord warp_count = cutlass::gemm::GemmCoord(), + MathInstructionDescription math_instruction = MathInstructionDescription(), + int minimum_compute_capability = 0, + int maximum_compute_capability = 0, + cutlass::gemm::GemmCoord cluster_shape = cutlass::gemm::GemmCoord(1,1,1) + ): + threadblock_shape(threadblock_shape), + threadblock_stages(threadblock_stages), + warp_count(warp_count), + math_instruction(math_instruction), + minimum_compute_capability(minimum_compute_capability), + maximum_compute_capability(maximum_compute_capability), + cluster_shape(cluster_shape) { } + + // Equality operator + inline + bool operator==(TileDescription const& rhs) const{ + return ( + (threadblock_shape == rhs.threadblock_shape) && + (threadblock_stages == rhs.threadblock_stages) && + (warp_count == rhs.warp_count) && + (math_instruction == rhs.math_instruction) && + (minimum_compute_capability == rhs.minimum_compute_capability) && + (maximum_compute_capability == rhs.maximum_compute_capability)); + } + + // Inequality operator + inline + bool operator!=(TileDescription const& rhs) const { + return !(*this == rhs); + } +}; + +/// High-level description of an operation +struct OperationDescription { + + /// Unique identifier describing the operation + char const * name; + + /// Operation provider + Provider provider; + + /// Kind of operation + OperationKind kind; + + /// Describes the tiled structure of a GEMM-like computation + TileDescription tile_description; + + // + // Methods + // + OperationDescription( + char const * name = "unknown", + Provider provider = Provider::kInvalid, + OperationKind kind = OperationKind::kInvalid, + TileDescription const& tile_description = TileDescription() + ): + name(name), provider(provider), kind(kind), tile_description(tile_description) { } +}; + +/// Structure describing the properties of a tensor +struct TensorDescription { + + /// Numeric type of an individual element + NumericTypeID element; + + /// Enumerant identifying the layout function for the tensor + LayoutTypeID layout; + + /// Alignment restriction on pointers, strides, and extents + int alignment; + + /// log2() of the maximum extent of each dimension + int log_extent_range; + + /// log2() of the maximum value each relevant stride may have + int log_stride_range; + + // + // Methods + // + + TensorDescription( + NumericTypeID element = NumericTypeID::kInvalid, + LayoutTypeID layout = LayoutTypeID::kInvalid, + int alignment = 1, + int log_extent_range = 24, + int log_stride_range = 24 + ): + element(element), + layout(layout), + alignment(alignment), + log_extent_range(log_extent_range), + log_stride_range(log_stride_range) { } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Description of all GEMM computations +struct GemmDescription : public OperationDescription { + + /// Indicates the kind of GEMM performed + GemmKind gemm_kind; + + /// Describes the A operand + TensorDescription A; + + /// Describes the B operand + TensorDescription B; + + /// Describes the source matrix + TensorDescription C; + + /// Describes the destination matrix + TensorDescription D; + + /// Describes the sparse meta matrices + TensorDescription E; + + /// Describes the data type of the scalars passed to the epilogue + NumericTypeID element_epilogue; + + /// Describes the structure of parallel reductions + SplitKMode split_k_mode; + + /// Transformation on A operand + ComplexTransform transform_A; + + /// Transformation on B operand + ComplexTransform transform_B; + + // + // Methods + // + + GemmDescription( + GemmKind gemm_kind = GemmKind::kGemm, + TensorDescription const& A = TensorDescription(), + TensorDescription const& B = TensorDescription(), + TensorDescription const& C = TensorDescription(), + TensorDescription const& D = TensorDescription(), + NumericTypeID element_epilogue = NumericTypeID::kInvalid, + SplitKMode split_k_mode = SplitKMode::kNone, + ComplexTransform transform_A = ComplexTransform::kNone, + ComplexTransform transform_B = ComplexTransform::kNone + ): + gemm_kind(gemm_kind), + A(A), + B(B), + C(C), + D(D), + element_epilogue(element_epilogue), + split_k_mode(split_k_mode), + transform_A(transform_A), + transform_B(transform_B) {} + + GemmDescription( + OperationDescription op_desc, + GemmKind gemm_kind, + TensorDescription const& A, + TensorDescription const& B, + TensorDescription const& C, + TensorDescription const& D, + NumericTypeID element_epilogue, + SplitKMode split_k_mode, + ComplexTransform transform_A, + ComplexTransform transform_B + ): + OperationDescription(op_desc), + gemm_kind(gemm_kind), + A(A), + B(B), + C(C), + D(D), + element_epilogue(element_epilogue), + split_k_mode(split_k_mode), + transform_A(transform_A), + transform_B(transform_B) {} +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Description for structured sparse GEMMs. +struct SparseGemmDescription : public GemmDescription { + + /// Description structure for structured sparse GEMM + SparseGemmDescription( + GemmKind gemm_kind = GemmKind::kGemm, + TensorDescription const& A = TensorDescription(), + TensorDescription const& B = TensorDescription(), + TensorDescription const& C = TensorDescription(), + TensorDescription const& D = TensorDescription(), + TensorDescription const& E = TensorDescription(), + NumericTypeID element_epilogue = NumericTypeID::kInvalid, + SplitKMode split_k_mode = SplitKMode::kNone, + ComplexTransform transform_A = ComplexTransform::kNone, + ComplexTransform transform_B = ComplexTransform::kNone + ): + GemmDescription(gemm_kind, A, B, C, D, element_epilogue, split_k_mode, transform_A, transform_B) + {this->E = E;} +}; + +/// Description of all Reduction operations +struct ReductionDescription : public OperationDescription { + + /// Describes the data type of workspace + NumericTypeID element_workspace; + + /// Describes the data type of final output + NumericTypeID element_output; + + /// Describes the data type of the scalars passed to the epilogue + NumericTypeID element_epilogue; +}; + +/// Description of all Rank K update computations (SYRK, HERK, SYR2K, HER2K) +struct RankKDescription : public OperationDescription { + + /// Indicates which device template is used (universal or regular) + RankKKind rank_k_kind; + + /// Number of rank update (rank k or rank 2k) + int num_ranks; + + /// Describes the A operand + TensorDescription A; + + /// Describes the B operand (used only for SYR2K and HER2K) + TensorDescription B; + + /// Describes the source and destination matrices + TensorDescription C; + + /// Describes the fill mode for matrix C + FillMode fill_mode; + + /// Describes the blas mode (symmetric/hermitian) + BlasMode blas_mode; + + /// Describes the data type of the scalars passed to the epilogue + NumericTypeID element_epilogue; + + /// Describes the structure of parallel reductions + SplitKMode split_k_mode; + + /// Transformation on A operand + ComplexTransform transform_A; + + /// Transformation on B operand + ComplexTransform transform_B; + + // + // Methods + // + + RankKDescription( + RankKKind rank_k_kind = RankKKind::kUniversal, + int num_ranks = 1, + TensorDescription const& A = TensorDescription(), + TensorDescription const& B = TensorDescription(), + TensorDescription const& C = TensorDescription(), + FillMode fill_mode = FillMode::kInvalid, + BlasMode blas_mode = BlasMode::kInvalid, + NumericTypeID element_epilogue = NumericTypeID::kInvalid, + SplitKMode split_k_mode = SplitKMode::kNone, + ComplexTransform transform_A = ComplexTransform::kNone, + ComplexTransform transform_B = ComplexTransform::kNone + ): + rank_k_kind(rank_k_kind), + num_ranks(num_ranks), + A(A), + B(B), + C(C), + fill_mode(fill_mode), + blas_mode(blas_mode), + element_epilogue(element_epilogue), + split_k_mode(split_k_mode), + transform_A(transform_A), + transform_B(transform_B) {} +}; +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Description of all TRMM computations +struct TrmmDescription : public OperationDescription { + + /// Indicates the kind of TRMM performed + TrmmKind trmm_kind; + + /// Describes the A operand + TensorDescription A; + + /// Describes the side mode for matrix A + SideMode side_mode; + + /// Describes the fill mode for matrix A + FillMode fill_mode; + + /// Describes the diag type for matrix A + DiagType diag_type; + + /// Describes the B operand + TensorDescription B; + + /// Describes the source and destination matrices + TensorDescription D; + + /// Describes the data type of the scalars passed to the epilogue + NumericTypeID element_epilogue; + + /// Describes the structure of parallel reductions + SplitKMode split_k_mode; + + /// Transformation on A operand + ComplexTransform transform_A; + + // + // Methods + // + + TrmmDescription( + TrmmKind trmm_kind = TrmmKind::kUniversal, + TensorDescription const& A = TensorDescription(), + SideMode side_mode = SideMode::kInvalid, + FillMode fill_mode = FillMode::kInvalid, + DiagType diag_type = DiagType::kInvalid, + TensorDescription const& B = TensorDescription(), + TensorDescription const& D = TensorDescription(), + NumericTypeID element_epilogue = NumericTypeID::kInvalid, + SplitKMode split_k_mode = SplitKMode::kNone, + ComplexTransform transform_A = ComplexTransform::kNone + ): + trmm_kind(trmm_kind), + A(A), + side_mode(side_mode), + fill_mode(fill_mode), + diag_type(diag_type), + B(B), + D(D), + element_epilogue(element_epilogue), + split_k_mode(split_k_mode), + transform_A(transform_A) {} +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Description of all SYMM/HEMM update computations +struct SymmDescription : public OperationDescription { + + /// Indicates which device template is used (universal or regular) + SymmKind symm_kind; + + /// Describes the A operand + TensorDescription A; + + /// Describes the B operand + TensorDescription B; + + /// Describes the source and destination matrices + TensorDescription C; + + /// Describes the side mode for matrix A + SideMode side_mode; + + /// Describes the fill mode for matrix A + FillMode fill_mode; + + /// Describes the blas mode (symmetric/hermitian) + BlasMode blas_mode; + + /// Describes the data type of the scalars passed to the epilogue + NumericTypeID element_epilogue; + + /// Describes the structure of parallel reductions + SplitKMode split_k_mode; + + /// Transformation on A operand + ComplexTransform transform_A; + + /// Transformation on B operand + ComplexTransform transform_B; + + // + // Methods + // + + SymmDescription( + SymmKind symm_kind = SymmKind::kUniversal, + TensorDescription const& A = TensorDescription(), + TensorDescription const& B = TensorDescription(), + TensorDescription const& C = TensorDescription(), + SideMode side_mode = SideMode::kInvalid, + FillMode fill_mode = FillMode::kInvalid, + BlasMode blas_mode = BlasMode::kInvalid, + NumericTypeID element_epilogue = NumericTypeID::kInvalid, + SplitKMode split_k_mode = SplitKMode::kNone, + ComplexTransform transform_A = ComplexTransform::kNone, + ComplexTransform transform_B = ComplexTransform::kNone + ): + symm_kind(symm_kind), + A(A), + B(B), + C(C), + side_mode(side_mode), + fill_mode(fill_mode), + blas_mode(blas_mode), + element_epilogue(element_epilogue), + split_k_mode(split_k_mode), + transform_A(transform_A), + transform_B(transform_B) {} +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Description of all Conv2d operations +struct ConvDescription : public OperationDescription { + /// Describes the convolution dimension support (2D or 3D) + int conv_dim; + + /// Describes the kind of convolution + ConvKind conv_kind; + + /// Describes the type of iterator algorithm (analytic or precomputed) + IteratorAlgorithmID iterator_algorithm; + + /// Describes the A operand + TensorDescription A; + + /// Describes the B operand + TensorDescription B; + + /// Describes the C operand + TensorDescription C; + + /// Describes the data type of the scalars passed to the epilogue + NumericTypeID element_epilogue; + + // + // Methods + // + // Returns Activation TensorDescription + TensorDescription activation() const { + switch(conv_kind) { + case library::ConvKind::kFprop : return A; + case library::ConvKind::kDgrad : return C; + case library::ConvKind::kWgrad : return B; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns Filter TensorDescription + TensorDescription filter() const { + switch(conv_kind) { + case library::ConvKind::kFprop : return B; + case library::ConvKind::kDgrad : return B; + case library::ConvKind::kWgrad : return C; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns Output TensorDescription + TensorDescription output() const { + switch(conv_kind) { + case library::ConvKind::kFprop : return C; + case library::ConvKind::kDgrad : return A; + case library::ConvKind::kWgrad : return A; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/handle.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/handle.h new file mode 100644 index 0000000000000000000000000000000000000000..93070f31cafbf5bbba1a68833b752264f6728295 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/handle.h @@ -0,0 +1,355 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief BLAS-like handle used to launch operations on the CUDA device. +*/ + +#pragma once + +#include +#include "cutlass/library/library.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Handle object +class Handle { +private: + + /// Host workspace + static int const kHostWorkspaceSize = (4 << 10); + + /// Provider of operations + Provider provider_; + + /// CUDA device properties + cudaDeviceProp device_; + + /// CUDA stream + cudaStream_t stream_; + + /// Device workspace + void *workspace_; + + /// Size of device workspace in bytes + size_t workspace_size_; + + /// Indicates whether scalars are host or device pointers + ScalarPointerMode scalar_pointer_mode_; + + /// Pointer to the most recently executed operation + Operation const *last_operation_; + +public: + + /// Constructor + Handle(cudaStream_t stream = nullptr, size_t workspace_size = (4<<20)); + + /// Destructor + ~Handle(); + + /// Move constructor + Handle(Handle && handle); + + /// Move assignment operator + Handle &operator=(Handle && handle); + + // + // Persistent state accessors + // + + /// Returns compute capability of the selected device + int compute_capability() const; + + /// Sets the current CUDA stream + void set_stream(cudaStream_t stream); + + /// Gets the current CUDA stream + cudaStream_t get_stream() const; + + /// Gets the current provider + Provider get_provider() const; + + /// Sets the provider of operations + void set_provider(Provider provider); + + /// Gets the device workspace size + size_t get_workspace_size() const; + + /// Gets a pointer to the device workspace allocation in Global Memory + void *get_workspace() const; + + /// Sets the size of device workspace, invalidating calls to get_device_workspace() + void set_workspace_size(size_t bytes); + + /// Gets the scalar pointer mode + ScalarPointerMode get_scalar_pointer_mode() const; + + /// Sets the scalar pointer mode + void set_scalar_pointer_mode(ScalarPointerMode mode); + + /// Gets the most recently executed operation + Operation const *get_last_operation() const; + + // + // Computations + // + + /// Executes a GEMM computation: D <= alpha * A*B + beta * C + Status gemm( + + int M, /// GEMM M dimension + int N, /// GEMM N dimension + int K, /// GEMM K dimension + + NumericTypeID element_compute, /// Data type of internal accumulation + + NumericTypeID element_scalar, /// Data type of alpha/beta scalars + + void const *alpha, /// Pointer to alpha scalar + + NumericTypeID element_A, /// Data type of A matrix elements + LayoutTypeID layout_A, /// Layout of A matrix + ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices + + void const * ptr_A, /// Pointer to A matrix in Global Memory + int64_t lda, /// Leading dimension of A matrix + + NumericTypeID element_B, /// Data type of B matrix elements + LayoutTypeID layout_B, /// Layout of B matrix + ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices + + void const * ptr_B, /// Pointer to B matrix in Global Memory + int64_t ldb, /// Leading dimension of B matrix + + void const * beta, /// Pointer to beta scalar + + NumericTypeID element_C, /// Data type of C and D matrices + + void const * ptr_C, /// Pointer to C matrix + int64_t ldc, /// Leading dimension of C matrix + + void * ptr_D, /// Pointer to D matrix + int64_t ldd /// Leading dimension of D matrix + ); + + /// Executes a GEMM computation: D <= alpha * A*B + beta * C. + // + // Supports batched-strided, batched array or split-K serial or split-K parallel. + // + Status gemm_universal( + + GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched + + int M, /// GEMM M dimension + int N, /// GEMM N dimension + int K, /// GEMM K dimension + + NumericTypeID element_compute, /// Data type of internal accumulation + + NumericTypeID element_scalar, /// Data type of alpha/beta scalars + + void const *alpha, /// Pointer to alpha scalar + + NumericTypeID element_A, /// Data type of A matrix elements + LayoutTypeID layout_A, /// Layout of A matrix + ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices + void const * ptr_A, /// Pointer to A matrix in Global Memory + int64_t lda, /// Leading dimension of A matrix + + NumericTypeID element_B, /// Data type of B matrix elements + LayoutTypeID layout_B, /// Layout of B matrix + ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices + void const * ptr_B, /// Pointer to B matrix in Global Memory + int64_t ldb, /// Leading dimension of B matrix + + void const * beta, /// Pointer to beta scalar + + NumericTypeID element_C, /// Data type of C matrix + LayoutTypeID layout_C, /// Layout of D matrix + void const * ptr_C, /// Pointer to C matrix + int64_t ldc, /// Leading dimension of C matrix + + NumericTypeID element_D, /// Data type of D matrix + LayoutTypeID layout_D, /// Layout of D matrix + void * ptr_D, /// Pointer to D matrix + int64_t ldd, /// Leading dimension of D matrix + + int batch_count = 1, /// Batch count or number of split-K slices + + int64_t batch_stride_A = 0, /// Batch stride of A operand + int64_t batch_stride_B = 0, /// Batch stride of B operand + int64_t batch_stride_C = 0, /// Batch stride of C operand + int64_t batch_stride_D = 0 /// Batch stride of D operand + ); + + /// Planar complex GEMM + /// + /// Note, all data types are the real-valued base types used by the planar-complex GEMM kernel. + /// + Status gemm_planar_complex( + + int M, /// GEMM M dimension + int N, /// GEMM N dimension + int K, /// GEMM K dimension + + NumericTypeID element_compute, /// Data type of internal accumulation + + NumericTypeID element_scalar, /// Data type of alpha/beta scalars + + void const *alpha, /// Pointer to alpha scalar + + NumericTypeID element_A, /// Data type of A matrix elements + LayoutTypeID layout_A, /// Layout of A matrix + ComplexTransform transform_A, /// Complex transformation applied to A matrix + + void const * ptr_A_real, /// Pointer to real part of A matrix + void const * ptr_A_imag, /// Pointer to imaginary part of A matrix + int64_t lda_real, /// Leading dimension of real part of A matrix + int64_t lda_imag, /// Leading dimension of imaginary part of A matrix + + NumericTypeID element_B, /// Data type of B matrix elements + LayoutTypeID layout_B, /// Layout of B matrix + ComplexTransform transform_B, /// Complex transformation applied to B matrix + + void const * ptr_B_real, /// Pointer to real part of B matrix + void const * ptr_B_imag, /// Pointer to imaginary part of B matrix + int64_t ldb_real, /// Leading dimension of real part of B matrix + int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix + + void const * beta, /// Pointer to beta scalar + + NumericTypeID element_C, /// Data type of C and D matrix + + void const * ptr_C_real, /// Pointer to real part of C matrix + void const * ptr_C_imag, /// Pointer to imaginary part of C matrix + int64_t ldc_real, /// Leading dimension of real part of C matrix + int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix + + void * ptr_D_real, /// Pointer to real part of D matrix + void * ptr_D_imag, /// Pointer to imaginary part of D matrix + int64_t ldd_real, /// Leading dimension of real part of D matrix + int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix + + int batch_count = 1, /// Number of batched GEMMs to execute + + int64_t batch_stride_A_real = 0, + int64_t batch_stride_A_imag = 0, + + int64_t batch_stride_B_real = 0, + int64_t batch_stride_B_imag = 0, + + int64_t batch_stride_C_real = 0, + int64_t batch_stride_C_imag = 0, + + int64_t batch_stride_D_real = 0, + int64_t batch_stride_D_imag = 0 + ); + + /// Planar complex GEMM loading pointers from arrays in global memory + Status gemm_planar_complex_array( + + int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid) + int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid) + int expected_K, /// Expected GEMM K dimension + int batch_count, /// Number of independent GEMM computations to execute + + int const *M, /// Array containing the GEMM M dimension for each batch index + int const *N, /// Array containing the GEMM N dimension for each batch index + int const *K, /// Array containing the GEMM K dimension for each batch index + + NumericTypeID element_compute, /// Data type of internal accumulation + + NumericTypeID element_scalar, /// Data type of alpha/beta scalars + + void const *alpha, /// Pointer to alpha scalar + + NumericTypeID element_A, /// Data type of A matrix elements + LayoutTypeID layout_A, /// Layout of A matrix + ComplexTransform transform_A, /// Complex transformation applied to A matrix + + void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices + void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices + + int64_t lda_real, /// Leading dimension of real part of A matrix + int64_t lda_imag, /// Leading dimension of imaginary part of A matrix + + NumericTypeID element_B, /// Data type of B matrix elements + LayoutTypeID layout_B, /// Layout of B matrix + ComplexTransform transform_B, /// Complex transformation applied to B matrix + + void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices + void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices + + int64_t ldb_real, /// Leading dimension of real part of B matrix + int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix + + void const * beta, /// Pointer to beta scalar + + NumericTypeID element_C, /// Data type of C and D matrix + + void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices + void const * const * ptr_C_imag, /// Pointer to array containing pointers to imaginary part of C matrices + + int64_t ldc_real, /// Leading dimension of real part of C matrix + int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix + + void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices + void * const * ptr_D_imag, /// Pointer to array containing pointers to imaginary part of D matrices + + int64_t ldd_real, /// Leading dimension of real part of D matrix + int64_t ldd_imag /// Leading dimension of imaginary part of D matrix + ); + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Unique pointer storing the handle +using HandlePtr = std::unique_ptr; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Finds conv2d operation instances with Conv2d::ElementC = Reduction::ElementWorkspace +Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation); +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Finds gemm operation instances with ElementC = Reduction::ElementWorkspace +Operation const* find_gemm_operation_for_parallel_reduction(Operation const *operation); +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/library.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/library.h new file mode 100644 index 0000000000000000000000000000000000000000..3c945d14ba59f33ba8f00b421a514da408c2e218 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/library.h @@ -0,0 +1,810 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + + \brief CUTLASS Library is an object-oriented approach to managing operations implemented by CUTLASS. + + Generally, + + description - compile-time constant parameters used to instantiate an operation + + configuration - runtime parameters with computationally expensive initialization + + arguments - runtime parameters that may be passed to an initialized operation with low + computational overhead +*/ + +#pragma once + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#include +#include +#include +#include +#include + +#include "cutlass/cutlass.h" +#include "cutlass/library/types.h" +#include "cutlass/library/descriptions.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/tensor_coord.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/blas3.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/conv/convolution.h" +#include "cutlass/conv/conv2d_problem_size.h" +#include "cutlass/conv/conv3d_problem_size.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mode of Universal GEMM +using GemmUniversalMode = cutlass::gemm::GemmUniversalMode; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Base class for all operations +class Operation { +public: + + virtual ~Operation() { } + + virtual OperationDescription const & description() const = 0; + + virtual Status can_implement( + void const *configuration, + void const *arguments) const = 0; + + virtual uint64_t get_host_workspace_size( + void const *configuration) const = 0; + + virtual uint64_t get_device_workspace_size( + void const *configuration, + void const *arguments = nullptr) const = 0; + + virtual Status initialize( + void const *configuration, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const = 0; + + virtual Status run( + void const *arguments, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const = 0; + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Configuration for basic GEMM operations +// +// OperationKind: Gemm +// GemmKind: Gemm +// +struct GemmConfiguration { + + /// GEMM problem size + gemm::GemmCoord problem_size; + + /// Leading dimension of A matrix + int64_t lda; + + /// Leading dimension of B matrix + int64_t ldb; + + /// Leading dimension of C matrix + int64_t ldc; + + /// Leading dimension of D matrix + int64_t ldd; + + /// Number of partitions of K dimension + int split_k_slices; +}; + +/// Arguments for GEMM +struct GemmArguments { + + /// Pointer to A matrix + void const *A; + + /// Pointer to B matrix + void const *B; + + /// Pointer to C matrix + void const *C; + + /// Pointer to D matrix + void *D; + + /// Host or device pointer to alpha scalar + void const *alpha; + + /// Host or device pointer to beta scalar + void const *beta; + + /// Enumerant indicating whether alpha/beta point to host or device memory + ScalarPointerMode pointer_mode; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Configuration for batched GEMM in which multiple matrix products are computed +// +// OperationKind: Gemm +// GemmKind: Batched + +struct GemmBatchedConfiguration { + + /// GEMM problem size + gemm::GemmCoord problem_size; + + /// Leading dimension of A matrix + int64_t lda; + + /// Leading dimension of B matrix + int64_t ldb; + + /// Leading dimension of C matrix + int64_t ldc; + + /// Leading dimension of D matrix + int64_t ldd; + + /// Stride between instances of the A matrix in memory + int64_t batch_stride_A; + + /// Stride between instances of the B matrix in memory + int64_t batch_stride_B; + + /// Stride between instances of the C matrix in memory + int64_t batch_stride_C; + + /// Stride between instances of the D matrix in memory + int64_t batch_stride_D; + + /// Number of GEMMs in batch + int batch_count; +}; + +/// Arguments to batched GEMM +using GemmBatchedArguments = GemmArguments; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Configuration for batched GEMM in which multiple matrix products are computed +// +// OperationKind: Gemm +// GemmKind: Array + +struct GemmArrayConfiguration { + + gemm::GemmCoord problem_size; + + /// Leading dimension of A matrix + int64_t lda; + + /// Leading dimension of B matrix + int64_t ldb; + + /// Leading dimension of C matrix + int64_t ldc; + + /// Leading dimension of D matrix + int64_t ldd; + + int batch_count; +}; + +/// Arguments for GEMM - used by all the GEMM operations +struct GemmArrayArguments { + void const * const *A; + void const * const *B; + void const * const *C; + void * const *D; + void const *alpha; + void const *beta; + ScalarPointerMode pointer_mode; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Universal GEMM supporting multiple split-K modes, multiple batched modes, real and complex +// +// OperationKind: Gemm +// GemmKind: Universal + +struct GemmUniversalConfiguration { + + GemmUniversalMode mode; + gemm::GemmCoord problem_size; + int batch_count; + + int64_t lda; + int64_t ldb; + int64_t ldc; + int64_t ldd; +}; + +struct GemmUniversalArguments { + // NOTE: these are replicated for 3.0 interfaces + gemm::GemmCoord problem_size; + int batch_count; + + void const *A; + void const *B; + void const *C; + void *D; + + void const *alpha; + void const *beta; + ScalarPointerMode pointer_mode; + + // NOTE: these are replicated for 3.0 interfaces + int64_t lda; + int64_t ldb; + int64_t ldc; + int64_t ldd; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + // Needed for some 3.x kernels + int sm_count; + + library::RasterOrder raster_order; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Complex valued GEMM in which real and imaginary parts are separated by a stride +// +// OperationKind: Gemm +// GemmKind: Planar complex + +struct GemmPlanarComplexConfiguration { + + GemmUniversalMode mode; + gemm::GemmCoord problem_size; + int batch_count; + + int64_t lda_real; + int64_t lda_imag; + + int64_t ldb_real; + int64_t ldb_imag; + + int64_t ldc_real; + int64_t ldc_imag; + + int64_t ldd_real; + int64_t ldd_imag; +}; + +/// Arguments for planar complex GEMMs +struct GemmPlanarComplexArguments { + + void const *A_real; + void const *A_imag; + + void const *B_real; + void const *B_imag; + + void const *C_real; + void const *C_imag; + + void *D_real; + void *D_imag; + + void const *alpha; + void const *beta; + ScalarPointerMode pointer_mode; + + int64_t batch_stride_A_real; + int64_t batch_stride_A_imag; + + int64_t batch_stride_B_real; + int64_t batch_stride_B_imag; + + int64_t batch_stride_C_real; + int64_t batch_stride_C_imag; + + int64_t batch_stride_D_real; + int64_t batch_stride_D_imag; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This is a special form of planar complex which loads pointers and problem size +/// from memory. +struct GemmPlanarComplexArrayConfiguration { + + gemm::GemmCoord problem_size; + int batch_count; + + int64_t lda_real; + int64_t lda_imag; + + int64_t ldb_real; + int64_t ldb_imag; + + int64_t ldc_real; + int64_t ldc_imag; + + int64_t ldd_real; + int64_t ldd_imag; +}; + +/// Arguments for planar complex GEMMs +struct GemmPlanarComplexArrayArguments { + + int const *M; + int const *N; + int const *K; + + void const * const * A_real; + void const * const * A_imag; + void const * const * B_real; + void const * const * B_imag; + void const * const * C_real; + void const * const * C_imag; + void * const * D_real; + void * const * D_imag; + + void const * alpha; + void const * beta; + ScalarPointerMode pointer_mode; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Grouped GEMM supporting +// +// OperationKind: Gemm +// GemmKind: Grouped + +struct GemmGroupedConfiguration { + + int problem_count; + int threadblock_count; + +}; + +struct GemmGroupedArguments { + + gemm::GemmCoord *problem_sizes; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + int64_t *lda; + int64_t *ldb; + int64_t *ldc; + int64_t *ldd; + + void const *alpha; + void const *beta; + ScalarPointerMode pointer_mode; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// OperationKind: kSparseGemm +// + +/// Computes GEMM assuming one of the inputs has 2:4 structured sparsity. +struct SparseGemmConfiguration { + + GemmUniversalMode mode; + gemm::GemmCoord problem_size; + int batch_count; /// number of sparse matrix products in batch + + int64_t lda; /// leading dimension of A operand + int64_t ldb; /// leading dimension of B operand + int64_t ldc; /// leading dimension of C operand + int64_t ldd; /// leading dimension of D operand + int64_t lde; /// leading dimension of E operand (metadata matrix) + + int64_t batch_stride_A; // stride between matrices + int64_t batch_stride_B; // stride between matrices + int64_t batch_stride_C; // stride between matrices + int64_t batch_stride_D; // stride between matrices + int64_t batch_stride_E; // stride between matrices +}; + +/// Arguments for sparse GEMMs +struct SparseGemmArguments { + + void const *A; /// pointer to A matrix + void const *B; /// pointer to B matrix + void const *C; /// pointer to C matrix + void *D; /// pointer to D matrix + void const *E; /// pointer to E matrix (metadata) + + void const *alpha; /// pointer to alpha scalar + void const *beta; /// pointer to beta scalar + ScalarPointerMode pointer_mode; /// enumerant indicating whether alpha/beta pointers are host + /// or device pointers. +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Configuration for basic Rank K update operations +// +// OperationKind: (Syrk, Herk, Syr2k, Her2k) +// RankKKind: Universal +// +struct RankKConfiguration { + + /// SYRK problem size + gemm::GemmCoord problem_size; + + /// Leading dimension of A matrix + int64_t lda; + + /// Leading dimension of B matrix + int64_t ldb; + + /// Leading dimension of C matrix + int64_t ldc; + + /// Leading dimension of D matrix + int64_t ldd; + + /// Batch Count + int batch_count; +}; + +/// Arguments for (Syrk, Herk, Syr2k, Her2k) +struct RankKArguments { + + /// Pointer to A matrix + void const *A; + + /// Pointer to B matrix (used only for Syr2k and Her2k) + void const *B; + + /// Pointer to C matrix + void const *C; + + /// Pointer to D matrix + void *D; + + /// Host or device pointer to alpha scalar + void const *alpha; + + /// Host or device pointer to beta scalar + void const *beta; + + /// Enumerant indicating whether alpha/beta point to host or device memory + ScalarPointerMode pointer_mode; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Configuration for basic TRMM operations +// +// OperationKind: Trmm +// TrmmKind: Universal +// +struct TrmmConfiguration { + + /// TRMM problem size + gemm::GemmCoord problem_size; + + /// Leading dimension of A matrix + int64_t lda; + + /// Leading dimension of B matrix + int64_t ldb; + + /// Leading dimension of D matrix + int64_t ldd; + + /// Batch Count + int batch_count; +}; + +/// Arguments for TRMM +struct TrmmArguments { + + /// Pointer to A matrix + void const *A; + + /// Pointer to B matrix + void const *B; + + /// Pointer to D matrix + void *D; + + /// Host or device pointer to alpha scalar + void const *alpha; + + /// Host or device pointer to beta scalar + void const *beta; + + /// Enumerant indicating whether alpha/beta point to host or device memory + ScalarPointerMode pointer_mode; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_D; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Configuration for basic SYMM/HEMM update operations +// +// OperationKind: (Symm, Hemm) +// SymmKind: Universal +// +struct SymmConfiguration { + + /// SYMM/HEMM problem size + gemm::GemmCoord problem_size; + + /// Leading dimension of A matrix + int64_t lda; + + /// Leading dimension of B matrix + int64_t ldb; + + /// Leading dimension of C matrix + int64_t ldc; + + /// Leading dimension of D matrix + int64_t ldd; + + /// Batch Count + int batch_count; +}; + +/// Arguments for (Symm, Hemm) +struct SymmArguments { + + /// Pointer to A matrix + void const *A; + + /// Pointer to B matrix + void const *B; + + /// Pointer to C matrix + void const *C; + + /// Pointer to D matrix + void *D; + + /// Host or device pointer to alpha scalar + void const *alpha; + + /// Host or device pointer to beta scalar + void const *beta; + + /// Enumerant indicating whether alpha/beta point to host or device memory + ScalarPointerMode pointer_mode; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Two dimensional convolution +// +// OperationKind: Conv2d +// +struct Conv2dConfiguration { + + conv::SplitKMode split_k_mode; + + /// Conv2d problem size + // contains strictly conv2d size (N,H,W,C,K,R,S,P,Q,padding,stride,dilation,mode) + // also includes (split_k_slices, groups) + conv::Conv2dProblemSize problem_size; + + // stride of operand A + std::vector stride_a; + + // stride of operand B + std::vector stride_b; + + // stride of operand C + std::vector stride_c; +}; + + +/// Three dimensional convolution +// +// OperationKind: Conv3d +// +struct Conv3dConfiguration { + + conv::SplitKMode split_k_mode; + + /// Conv2d problem size + // contains strictly conv2d size (N,D,H,W,C,K,T,R,S,Z,P,Q,padding,stride,dilation,mode) + // also includes (split_k_slices, groups) + conv::Conv3dProblemSize problem_size; + + /// Layout object for activations tensor + layout::TensorNDHWC layout_activations; + + /// Layout object for filters tensor + layout::TensorNDHWC layout_filters; + + /// Layout object for source tensor + layout::TensorNDHWC layout_source; + + /// Layout object for output tensor + layout::TensorNDHWC layout_output; + + // + // Methods + // + + // Mapping functions (A,B,C -> activation,filter,output) + layout::TensorNDHWC layout_a(library::ConvKind const &conv_kind) const { + switch (conv_kind) { + case library::ConvKind::kFprop: return layout_activations; + case library::ConvKind::kDgrad: return layout_output; + case library::ConvKind::kWgrad: return layout_output; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + layout::TensorNDHWC layout_b(library::ConvKind const &conv_kind) const { + switch (conv_kind) { + case library::ConvKind::kFprop: return layout_filters; + case library::ConvKind::kDgrad: return layout_filters; + case library::ConvKind::kWgrad: return layout_activations; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + layout::TensorNDHWC layout_c(library::ConvKind const &conv_kind) const { + switch (conv_kind) { + case library::ConvKind::kFprop: return layout_output; + case library::ConvKind::kDgrad: return layout_activations; + case library::ConvKind::kWgrad: return layout_filters; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } +}; + +/// Arguments for CONV +struct ConvArguments { + + ///////////////////////////////////////////////////////// + /// ImplicitGemm matrices A, B, C, D + ///////////////////////////////////////////////////////// + /// pointer to implicit gemm matrix A + void const *A; + + /// pointer to implicit gemm matrix B + void const *B; + + /// pointer to reordered matrix B + void const *reordered_B; + + /// pointer to implicit gemm matrix C + void const *C; + + /// pointer to implicit gemm destination matrix D + void *D; + + /// Host or device pointer to alpha scalar + void const *alpha; + + /// Host or device pointer to beta scalar + void const *beta; + + /// Enumerant indicating whether alpha/beta point to host or device memory + ScalarPointerMode pointer_mode; + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Configuration for Reduction operations +// +// OperationKind: Reduction +// +struct ReductionConfiguration { + + /// Reduction problem size + MatrixCoord problem_size; + + /// Number of partitions to reduce + int partitions; + + /// Number of elements between each partition + int64_t partition_stride; + + /// leading dimension of 'w'orkspace operand + int64_t ldw; + + /// leading dimension of 's'ource operand + int64_t lds; + + /// leading dimension of 'd'estination operand + int64_t ldd; +}; + +/// Arguments for Reduction +struct ReductionArguments { + + /// Pointer to workspace matrix + void const *workspace; + + /// Pointer to source matrix + void const *source; + + /// Pointer to destination matrix + void *destination; + + /// pointer to reference matrix + void *reference; + + /// Host or device pointer to alpha scalar + void const *alpha; + + /// Host or device pointer to beta scalar + void const *beta; + + /// Enumerant indicating whether alpha/beta point to host or device memory + ScalarPointerMode pointer_mode; +}; + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/manifest.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/manifest.h new file mode 100644 index 0000000000000000000000000000000000000000..abce958bef51805c0bac468f6cd08d0479118524 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/manifest.h @@ -0,0 +1,110 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Manifest of CUTLASS Library + + This is the root of the data structure containing CUTLASS objects +*/ + +#pragma once + +#include +#include +#include + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#include "library.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Forward declaration +class Manifest; + +// init and insert all cutlass gemm operations in manifest object (procedurally generated using generator.py) +void initialize_all(Manifest &manifest); + +// init and insert all reduction op in manifest object (manually instantiated in library/reduction) +void initialize_all_reduction_op(Manifest &manifest); + +///////////////////////////////////////////////////////////////////////////////////////////////////////// + +/// List of operations +using OperationVector = std::vector>; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Manifest of CUTLASS Library +class Manifest { +private: + + /// Operation provider + Provider provider_; + + /// Global list of operations + OperationVector operations_; + +public: + Manifest (Provider provider = library::Provider::kCUTLASS) : provider_(provider) { } + + /// Top-level initialization + Status initialize(); + + /// Used for initialization + void reserve(size_t operation_count); + + /// Graceful shutdown + Status release(); + + /// Appends an operation and takes ownership + void append(Operation *operation_ptr); + + /// Returns an iterator to the first operation + OperationVector const &operations() const; + + /// Returns a const iterator + OperationVector::const_iterator begin() const; + + /// Returns a const iterator + OperationVector::const_iterator end() const; +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/operation_table.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/operation_table.h new file mode 100644 index 0000000000000000000000000000000000000000..06ea28b00a60e2a4a425c7ca48278b643ff11902 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/operation_table.h @@ -0,0 +1,526 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* + \file + \brief Defines a data structure in which a set of functionally equivalent library::Operation + instances may be queried. +*/ + +#pragma once +#include +#include +#include +#include + +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" +#include "cutlass/library/util.h" +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Data Structures for Gemm Functional Maps +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tuple uniquely identifying Gemm functional behavior +struct GemmFunctionalKey { + + Provider provider; + GemmKind gemm_kind; + NumericTypeID element_compute; + NumericTypeID element_scalar; + NumericTypeID element_A; + LayoutTypeID layout_A; + ComplexTransform transform_A; + NumericTypeID element_B; + LayoutTypeID layout_B; + ComplexTransform transform_B; + NumericTypeID element_C; + LayoutTypeID layout_C; + NumericTypeID element_D; + LayoutTypeID layout_D; + + // + // Methods + // + + inline + GemmFunctionalKey( + Provider provider, + GemmKind gemm_kind = GemmKind::kGemm, + NumericTypeID element_compute = NumericTypeID::kF32, + NumericTypeID element_scalar = NumericTypeID::kF32, + NumericTypeID element_A = NumericTypeID::kF16, + LayoutTypeID layout_A = LayoutTypeID::kColumnMajor, + ComplexTransform transform_A = ComplexTransform::kNone, + NumericTypeID element_B = NumericTypeID::kF16, + LayoutTypeID layout_B = LayoutTypeID::kColumnMajor, + ComplexTransform transform_B = ComplexTransform::kNone, + NumericTypeID element_C = NumericTypeID::kF16, + LayoutTypeID layout_C = LayoutTypeID::kColumnMajor, + NumericTypeID element_D = NumericTypeID::kF16, + LayoutTypeID layout_D = LayoutTypeID::kColumnMajor + ): + provider(provider), + gemm_kind(gemm_kind), + element_compute(element_compute), + element_scalar(element_scalar), + element_A(element_A), + layout_A(layout_A), + transform_A(transform_A), + element_B(element_B), + layout_B(layout_B), + transform_B(transform_B), + element_C(element_C), + layout_C(layout_C), + element_D(element_D), + layout_D(layout_D) + { } + + inline + bool operator==(GemmFunctionalKey const &rhs) const { + return + (provider == rhs.provider) && + (gemm_kind == rhs.gemm_kind) && + (element_compute == rhs.element_compute) && + (element_scalar == rhs.element_scalar) && + (element_A == rhs.element_A) && + (layout_A == rhs.layout_A) && + (transform_A == rhs.transform_A) && + (element_B == rhs.element_B) && + (layout_B == rhs.layout_B) && + (transform_B == rhs.transform_B) && + (element_C == rhs.element_C) && + (layout_C == rhs.layout_C) && + (element_D == rhs.element_D) && + (layout_D == rhs.layout_D); + } + + inline + bool operator!=(GemmFunctionalKey const &rhs) const { + return !(*this == rhs); + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// +inline +std::ostream & operator<<(std::ostream &out, cutlass::library::GemmFunctionalKey const &k) { + + out << "{\n" + << " provider: " << to_string(k.provider) << "\n" + << " gemm_kind: " << to_string(k.gemm_kind) << "\n" + << " element_compute: " << to_string(k.element_compute) << "\n" + << " element_scalar: " << to_string(k.element_scalar) << "\n" + << " element_A: " << to_string(k.element_A) << "\n" + << " layout_A: " << to_string(k.layout_A) << "\n" + << " transform_A: " << to_string(k.transform_A) << "\n" + << " element_B: " << to_string(k.element_B) << "\n" + << " layout_B: " << to_string(k.layout_B) << "\n" + << " transform_B: " << to_string(k.transform_B) << "\n" + << " element_C: " << to_string(k.element_C) << "\n" + << " layout_C: " << to_string(k.layout_C) << "\n" + << " element_D: " << to_string(k.element_D) << "\n" + << " layout_D: " << to_string(k.layout_D) << "\n" + << "}"; + + return out; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Hash function for GemmFunctionalKey +struct GemmFunctionalKeyHasher { + using IntHash = std::hash; + + inline + static size_t rotl(size_t key, int shl) { + return (key << shl) | (key >> (sizeof(key)*8 - shl)); + } + + inline + size_t operator()(GemmFunctionalKey const &key) const { + IntHash hash; + + return + rotl(hash(int(key.provider)), 1) ^ + rotl(hash(int(key.gemm_kind)), 2) ^ + rotl(hash(int(key.element_compute)), 3) ^ + rotl(hash(int(key.element_scalar)), 4) ^ + rotl(hash(int(key.element_A)), 5) ^ + rotl(hash(int(key.layout_A)), 6) ^ + rotl(hash(int(key.transform_A)), 7) ^ + rotl(hash(int(key.element_B)), 8) ^ + rotl(hash(int(key.layout_B)), 9) ^ + rotl(hash(int(key.transform_B)), 10) ^ + rotl(hash(int(key.element_C)), 11) ^ + rotl(hash(int(key.layout_C)), 12) ^ + rotl(hash(int(key.element_D)), 13) ^ + rotl(hash(int(key.layout_D)), 14); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Establishes a partial ordering to search for GEMM operators +struct GemmPreferenceKey { + + int compute_capability; + int alignment; + + // + // Methods + // + + GemmPreferenceKey(): compute_capability(), alignment() { } + + GemmPreferenceKey(int cc, int alignment): compute_capability(cc), alignment(alignment) { } + + bool operator<(GemmPreferenceKey const &rhs) const { + return (compute_capability < rhs.compute_capability) || + ((compute_capability == rhs.compute_capability) && (alignment < rhs.alignment)); + } + + bool operator==(GemmPreferenceKey const &rhs) const { + return compute_capability == rhs.compute_capability; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Maps minimum compute capability onto a vector of possible operations +using GemmOperationVectorMap = std::map< + GemmPreferenceKey, + std::vector +>; + +/// Maps a GemmFunctionalKey onto a vector of Operation * objects expected to be of kind kGemm +using GemmOperationFunctionalMap = std::unordered_map< + GemmFunctionalKey, + GemmOperationVectorMap, + GemmFunctionalKeyHasher +>; +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Data Structures for Conv Functional Maps +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tuple uniquely identifying conv2d functional behavior +struct ConvFunctionalKey { + library::Provider provider; + library::ConvKind conv_kind; + library::NumericTypeID element_A; + library::LayoutTypeID layout_A; + library::NumericTypeID element_B; + library::LayoutTypeID layout_B; + library::NumericTypeID element_C; + library::LayoutTypeID layout_C; + library::NumericTypeID element_accumulator; + library::NumericTypeID element_compute; + + + // + // Methods + // + + inline + ConvFunctionalKey( + library::Provider provider = library::Provider::kInvalid, + library::ConvKind conv_kind = library::ConvKind::kFprop, + library::NumericTypeID element_A = library::NumericTypeID::kF16, + library::LayoutTypeID layout_A = library::LayoutTypeID::kTensorNHWC, + library::NumericTypeID element_B = library::NumericTypeID::kF16, + library::LayoutTypeID layout_B = library::LayoutTypeID::kTensorNHWC, + library::NumericTypeID element_C = library::NumericTypeID::kF16, + library::LayoutTypeID layout_C = library::LayoutTypeID::kTensorNHWC, + library::NumericTypeID element_accumulator = library::NumericTypeID::kF32, + library::NumericTypeID element_compute = library::NumericTypeID::kF32 + ): + provider(provider), + conv_kind(conv_kind), + element_A(element_A), + layout_A(layout_A), + element_B(element_B), + layout_B(layout_B), + element_C(element_C), + layout_C(layout_C), + element_accumulator(element_accumulator), + element_compute(element_compute) + { } + + inline + bool operator==(ConvFunctionalKey const &rhs) const { + return + (provider == rhs.provider) && + (conv_kind == rhs.conv_kind) && + (element_A == rhs.element_A) && + (layout_A == rhs.layout_A) && + (element_B == rhs.element_B) && + (layout_B == rhs.layout_B) && + (element_C == rhs.element_C) && + (layout_C == rhs.layout_C) && + (element_accumulator == rhs.element_accumulator) && + (element_compute == rhs.element_compute); + } + + inline + bool operator!=(ConvFunctionalKey const &rhs) const { + return !(*this == rhs); + } +}; +///////////////////////////////////////////////////////////////////////////////////////////////// +inline +std::ostream& operator<< (std::ostream& out, const cutlass::library::ConvFunctionalKey& key) { + out << "{\n" + << "provider: " << to_string(key.provider) << std::endl + << "conv_kind: " << to_string(key.conv_kind) << std::endl + << "element_A: " << to_string(key.element_A) << std::endl + << "layout_A: " << to_string(key.layout_A) << std::endl + << "element_B: " << to_string(key.element_B) << std::endl + << "layout_B: " << to_string(key.layout_B) << std::endl + << "element_C: " << to_string(key.element_C) << std::endl + << "layout_C: " << to_string(key.layout_C) << std::endl + << "element_accumulator: " << to_string(key.element_accumulator) << std::endl + << "element_compute: " << to_string(key.element_compute) << std::endl + << "}"; + + return out; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +struct ConvFunctionalKeyHasher { + using IntHash = std::hash; + + inline + static size_t rotl(size_t key, int shl) { + return (key << shl) | (key >> (sizeof(key)*8 - shl)); + } + + inline + size_t operator()(ConvFunctionalKey const &key) const { + IntHash hash; + + return + rotl(hash(int(key.provider)), 1) ^ + rotl(hash(int(key.conv_kind)), 2) ^ + rotl(hash(int(key.element_A)), 3) ^ + rotl(hash(int(key.layout_A)), 4) ^ + rotl(hash(int(key.element_B)), 5) ^ + rotl(hash(int(key.layout_B)), 6) ^ + rotl(hash(int(key.element_C)), 7) ^ + rotl(hash(int(key.layout_C)), 8) ^ + rotl(hash(int(key.element_accumulator)), 9) ^ + rotl(hash(int(key.element_compute)), 10); + } +}; +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Establishes a partial ordering to search for Conv2d operators +struct ConvPreferenceKey { + + int compute_capability; + IteratorAlgorithmID iterator_algorithm; + + + // + // Methods + // + + ConvPreferenceKey(): compute_capability(), iterator_algorithm() { } + + ConvPreferenceKey(int cc, IteratorAlgorithmID iterator_algorithm): + compute_capability(cc), iterator_algorithm(iterator_algorithm) { } + + bool operator<(ConvPreferenceKey const &rhs) const { + return (compute_capability < rhs.compute_capability) || + ((compute_capability == rhs.compute_capability) && (iterator_algorithm < rhs.iterator_algorithm)); + } + + bool operator==(ConvPreferenceKey const &rhs) const { + return (compute_capability == rhs.compute_capability) && + (iterator_algorithm == rhs.iterator_algorithm); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Maps minimum compute capability onto a vector of possible operations +using ConvOperationVectorMap = std::map< + ConvPreferenceKey, + std::vector +>; + +/// Maps a GemmFunctionalKey onto a vector of Operation * objects expected to be of kind kGemm +using ConvOperationFunctionalMap = std::unordered_map< + ConvFunctionalKey, + ConvOperationVectorMap, + ConvFunctionalKeyHasher +>; +///////////////////////////////////////////////////////////////////////////////////////////////// + + +/// Tuple uniquely identifying conv2d functional behavior +struct ReductionFunctionalKey { + library::Provider provider; + library::NumericTypeID element_workspace; + library::NumericTypeID element_accumulator; + library::NumericTypeID element_output; + library::NumericTypeID element_compute; + library::MathOperationID reduce_math_op; + library::EpilogueKind epilogue_math_op; + + + // + // Methods + // + + inline + ReductionFunctionalKey( + library::Provider provider = library::Provider::kInvalid, + library::NumericTypeID element_workspace = library::NumericTypeID::kF16, + library::NumericTypeID element_accumulator = library::NumericTypeID::kF32, + library::NumericTypeID element_output = library::NumericTypeID::kF16, + library::NumericTypeID element_compute = library::NumericTypeID::kF32, + library::MathOperationID reduce_math_op = library::MathOperationID::kAdd, + library::EpilogueKind epilogue_math_op = library::EpilogueKind::kLinearCombination + ): + provider(provider), + element_workspace(element_workspace), + element_accumulator(element_accumulator), + element_output(element_output), + element_compute(element_compute), + reduce_math_op(reduce_math_op), + epilogue_math_op(epilogue_math_op) + { } + + inline + bool operator==(ReductionFunctionalKey const &rhs) const { + return + (provider == rhs.provider) && + (element_workspace == rhs.element_workspace) && + (element_accumulator == rhs.element_accumulator) && + (element_output == rhs.element_output) && + (element_compute == rhs.element_compute) && + (reduce_math_op == rhs.reduce_math_op) && + (epilogue_math_op == rhs.epilogue_math_op); + } + + inline + bool operator!=(ReductionFunctionalKey const &rhs) const { + return !(*this == rhs); + } +}; + + +struct ReductionFunctionalKeyHasher { + using IntHash = std::hash; + + inline + static size_t rotl(size_t key, int shl) { + return (key << shl) | (key >> (sizeof(key)*8 - shl)); + } + + inline + size_t operator()(ReductionFunctionalKey const &key) const { + IntHash hash; + + return + rotl(hash(int(key.provider)), 1) ^ + rotl(hash(int(key.element_workspace)), 2) ^ + rotl(hash(int(key.element_accumulator)), 3) ^ + rotl(hash(int(key.element_output)), 4) ^ + rotl(hash(int(key.element_compute)), 5) ^ + rotl(hash(int(key.reduce_math_op)), 6) ^ + rotl(hash(int(key.epilogue_math_op)), 7); + } +}; +///////////////////////////////////////////////////////////////////////////////////////////////// + +inline +std::ostream& operator<< (std::ostream& out, const ReductionFunctionalKey& key) { + out << "{\n" + << "provider: " << library::to_string(key.provider) << std::endl + << "element_workspace : " << library::to_string(key.element_workspace) << std::endl + << "element_accumulator : " << library::to_string(key.element_accumulator) << std::endl + << "element_output : " << library::to_string(key.element_output) << std::endl + << "element_compute : " << library::to_string(key.element_compute) << std::endl + << "}"; + return out; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// ReductionOperationFunctionalMap has NO preference key and a single instance per functional key +// i.e. only one tile size configuration per functional key +using ReductionOperationFunctionalMap = std::unordered_map< + ReductionFunctionalKey, + library::Operation const *, + ReductionFunctionalKeyHasher +>; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Table of cutlass::library::Operation instances +class OperationTable { +public: + + /// Map of all operations of type kGemm + // provider (kCUTLASS) + GemmOperationFunctionalMap gemm_operations; + + /// Map of all operations of type kConv2d + // provider (kCUTLASS, kReferenceHost, kReferenceDevice) + ConvOperationFunctionalMap conv2d_operations; + + /// Map of all operations of type kConv3d + // provider (kCUTLASS, kReferenceHost, kReferenceDevice) + ConvOperationFunctionalMap conv3d_operations; + + /// Map of all operations of type kConv2d + // provider (kCUTLASS) + ReductionOperationFunctionalMap reduction_operations; + +public: + + void append(Manifest const &manifest); + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + +std::ostream & operator<<(std::ostream &out, cutlass::library::GemmFunctionalKey const &k); diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/singleton.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/singleton.h new file mode 100644 index 0000000000000000000000000000000000000000..e0bd9595411a6c22675be9604d04c5d8c526ed82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/singleton.h @@ -0,0 +1,68 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" +#include "cutlass/library/operation_table.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Singleton instance stores a Manifest and Operation table +class Singleton { +public: + + /// Manifest object + Manifest manifest; + + /// Operation table referencing the Manifest + OperationTable operation_table; + +public: + + Singleton(); + + static Singleton const &get(); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/types.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/types.h new file mode 100644 index 0000000000000000000000000000000000000000..c28efef539a82fbe22a73216280fc3c7af5f5b55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/types.h @@ -0,0 +1,265 @@ +/*************************************************************************************************** + * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + + #pragma once + + ///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Layout type identifier +enum class LayoutTypeID { + kUnknown, + kColumnMajor, + kRowMajor, + kColumnMajorInterleavedK2, + kRowMajorInterleavedK2, + kColumnMajorInterleavedK4, + kRowMajorInterleavedK4, + kColumnMajorInterleavedK16, + kRowMajorInterleavedK16, + kColumnMajorInterleavedK32, + kRowMajorInterleavedK32, + kColumnMajorInterleavedK64, + kRowMajorInterleavedK64, + kTensorNCHW, + kTensorNCDHW, + kTensorNHWC, + kTensorNDHWC, + kTensorNC32HW32, + kTensorC32RSK32, + kTensorNC64HW64, + kTensorC64RSK64, + kInvalid +}; + +/// Numeric data type +enum class NumericTypeID { + kUnknown, + kVoid, + kB1, + kU2, + kU4, + kU8, + kU16, + kU32, + kU64, + kS2, + kS4, + kS8, + kS16, + kS32, + kS64, + kFE4M3, + kFE5M2, + kF16, + kBF16, + kTF32, + kF32, + kF64, + kCF16, + kCBF16, + kCF32, + kCTF32, + kCF64, + kCS2, + kCS4, + kCS8, + kCS16, + kCS32, + kCS64, + kCU2, + kCU4, + kCU8, + kCU16, + kCU32, + kCU64, + kInvalid +}; + +/// Enumerated type describing a transformation on a complex value. +enum class ComplexTransform { + kNone, + kConjugate, + kInvalid +}; + +/// Providers +enum class Provider { + kNone, + kCUTLASS, + kReferenceHost, + kReferenceDevice, + kCUBLAS, + kCUDNN, + kInvalid +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Enumeration indicating the kind of operation +enum class OperationKind { + kGemm, + kRankK, + kRank2K, + kTrmm, + kSymm, + kConv2d, + kConv3d, + kEqGemm, + kSparseGemm, + kReduction, + kInvalid +}; + +/// Enumeration indicating whether scalars are in host or device memory +enum class ScalarPointerMode { + kHost, + kDevice, + kInvalid +}; + +/// Describes how reductions are performed across threadblocks +enum class SplitKMode { + kNone, + kSerial, + kParallel, + kParallelSerial, + kInvalid +}; + +/// Indicates the classificaition of the math instruction +enum class OpcodeClassID { + kSimt, + kTensorOp, + kWmmaTensorOp, + kSparseTensorOp, + kInvalid +}; + +enum class MathOperationID { + kAdd, + kMultiplyAdd, + kMultiplyAddSaturate, + kMultiplyAddFastBF16, + kMultiplyAddFastF16, + kMultiplyAddFastF32, + kMultiplyAddComplex, + kMultiplyAddComplexFastF32, + kMultiplyAddGaussianComplex, + kXorPopc, + kInvalid +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Enumeration indicating what kind of GEMM operation to perform +enum class GemmKind { + kGemm, + kSparse, + kUniversal, + kPlanarComplex, + kPlanarComplexArray, + kGrouped, + kInvalid +}; + +/// Enumeration indicating what kind of RankK update operation to perform +enum class RankKKind { + kUniversal, + kInvalid +}; + +/// Enumeration indicating what kind of TRMM operation to perform +enum class TrmmKind { + kUniversal, + kInvalid +}; + +/// Enumeration indicating what kind of SYMM/HEMM operation to perform +enum class SymmKind { + kUniversal, + kInvalid +}; + +/// Enumeration indicating what kind of Conv2d operation to perform +enum class ConvKind { + kUnknown, + kFprop, + kDgrad, + kWgrad, + kInvalid +}; + +enum class ConvModeID { + kCrossCorrelation, + kConvolution, + kInvalid +}; + +// Iterator algorithm enum in order of general performance-efficiency +enum class IteratorAlgorithmID { + kNone, + kAnalytic, + kOptimized, + kFixedChannels, + kFewChannels, + kInvalid +}; + + +enum class EpilogueKind { + kUnknown, + kConversion, + kLinearCombination, + kLinearCombinationClamp, + kLinearCombinationPlanarComplex, + kLinearCombinationRelu, + kLinearCombinationSigmoid, + kInvalid +}; + +enum class RasterOrder { + kAlongN, + kAlongM, + kHeuristic, + kInvalid +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/util.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/util.h new file mode 100644 index 0000000000000000000000000000000000000000..d385bfd759f5c8f30a1301ba9fbaff94da68457e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/include/cutlass/library/util.h @@ -0,0 +1,204 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + + \brief Utilities accompanying the CUTLASS library for interacting with Library types. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexical cast from string +template T from_string(std::string const &); + +/// Converts a Provider enumerant to a string +char const *to_string(Provider provider, bool pretty = false); + +/// Parses a Provider enumerant from a string +template <> Provider from_string(std::string const &str); + +/// Converts a GemmKind enumerant to a string +char const *to_string(GemmKind type, bool pretty = false); + +/// Converts a RankKKind enumerant to a string +char const *to_string(RankKKind type, bool pretty = false); + +/// Converts a TrmmKind enumerant to a string +char const *to_string(TrmmKind type, bool pretty = false); + +/// Converts a SymmKind enumerant to a string +char const *to_string(SymmKind type, bool pretty = false); + +/// Converts a SideMode enumerant to a string +char const *to_string(SideMode type, bool pretty = false); + +/// Converts a FillMode enumerant to a string +char const *to_string(FillMode type, bool pretty = false); + +/// Converts a BlasMode enumerant to a string +char const *to_string(BlasMode type, bool pretty = false); + +/// Converts a DiagType enumerant to a string +char const *to_string(DiagType type, bool pretty = false); + +/// Converts a NumericType enumerant to a string +char const *to_string(OperationKind type, bool pretty = false); + +/// Parses a NumericType enumerant from a string +template <> OperationKind from_string(std::string const &str); + +/// Converts a NumericType enumerant to a string +char const *to_string(NumericTypeID type, bool pretty = false); + +/// Parses a NumericType enumerant from a string +template <> NumericTypeID from_string(std::string const &str); + +/// Returns the size of a data type in bits +int sizeof_bits(NumericTypeID type); + +/// Returns true if the numeric type is a complex data type or false if real-valued. +bool is_complex_type(NumericTypeID type); + +/// Returns the real-valued type underlying a type (only different from 'type' if complex) +NumericTypeID get_real_type(NumericTypeID type); + +/// Returns true if numeric type is integer +bool is_integer_type(NumericTypeID type); + +/// Returns true if numeric type is signed +bool is_signed_type(NumericTypeID type); + +/// Returns true if numeric type is a signed integer +bool is_signed_integer(NumericTypeID type); + +/// returns true if numeric type is an unsigned integer +bool is_unsigned_integer(NumericTypeID type); + +/// Returns true if numeric type is floating-point type +bool is_float_type(NumericTypeID type); + +/// To string method for cutlass::Status +char const *to_string(Status status, bool pretty = false); + +/// Converts a LayoutTypeID enumerant to a string +char const *to_string(LayoutTypeID layout, bool pretty = false); + +/// Parses a LayoutType enumerant from a string +template <> LayoutTypeID from_string(std::string const &str); + +/// Returns the rank of a layout's stride base on the LayoutTypeID +int get_layout_stride_rank(LayoutTypeID layout_id); + +/// Converts a OpcodeClassID enumerant to a string +char const *to_string(OpcodeClassID type, bool pretty = false); + +/// Converts a OpcodeClassID enumerant from a string +template <> +OpcodeClassID from_string(std::string const &str); + +/// Converts a ComplexTransform enumerant to a string +char const *to_string(ComplexTransform type, bool pretty = false); + +/// Converts a ComplexTransform enumerant from a string +template <> +ComplexTransform from_string(std::string const &str); + + +/// Converts a SplitKMode enumerant to a string +char const *to_string(SplitKMode split_k_mode, bool pretty = false); + +/// Converts a SplitKMode enumerant from a string +template <> +SplitKMode from_string(std::string const &str); + +/// Converts a ConvModeID enumerant to a string +char const *to_string(ConvModeID type, bool pretty = false); + +/// Converts a ConvModeID enumerant from a string +template <> +ConvModeID from_string(std::string const &str); + +/// Converts a IteratorAlgorithmID enumerant to a string +char const *to_string(IteratorAlgorithmID type, bool pretty = false); + +/// Converts a IteratorAlgorithmID enumerant from a string +template <> +IteratorAlgorithmID from_string(std::string const &str); + +/// Converts a ConvKind enumerant to a string +char const *to_string(ConvKind type, bool pretty = false); + +/// Converts a ConvKind enumerant from a string +template <> +ConvKind from_string(std::string const &str); + +/// Converts a RasterOrder enumerant to a string +char const *to_string(RasterOrder type, bool pretty = false); + +/// Convers a RasterOrder enumerant from a string +template<> +RasterOrder from_string(std::string const &str); + +/// Lexical cast from int64_t to string +std::string lexical_cast(int64_t int_value); + +/// Lexical cast a string to a byte array. Returns true if cast is successful or false if invalid. +bool lexical_cast(std::vector &bytes, NumericTypeID type, std::string const &str); + +/// Lexical cast TO a string FROM a byte array. Returns true if cast is successful or false if invalid. +std::string lexical_cast(std::vector &bytes, NumericTypeID type); + +/// Casts from a signed int64 to the destination type. Returns true if successful. +bool cast_from_int64(std::vector &bytes, NumericTypeID type, int64_t src); + +/// Casts from an unsigned int64 to the destination type. Returns true if successful. +bool cast_from_uint64(std::vector &bytes, NumericTypeID type, uint64_t src); + +/// Casts from a real value represented as a double to the destination type. Returns true if successful. +bool cast_from_double(std::vector &bytes, NumericTypeID type, double src); + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/conv2d_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/conv2d_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..d1b1e841682252f008a5ad9aa8836e2d81242cec --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/conv2d_operation.h @@ -0,0 +1,642 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all CONV operation kinds in CUTLASS Library. +*/ + +#pragma once +#include +#include "cutlass/cutlass.h" +#include "cutlass/conv/kernel/default_conv2d_fprop.h" +#include "cutlass/conv/kernel/default_conv2d_group_fprop.h" +#include "cutlass/conv/kernel/default_depthwise_fprop.h" +#include "cutlass/conv/kernel/default_conv2d_dgrad.h" +#include "cutlass/conv/kernel/default_conv2d_wgrad.h" +#include "cutlass/conv/device/implicit_gemm_convolution.h" +#include "cutlass/conv/device/direct_convolution.h" + +#include "cutlass/library/library.h" +#include "library_internal.h" +#include "cutlass/util/host_tensor.h" + +#include "cutlass/util/reference/host/convolution.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/core_io.h" +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class Conv2dOperationBase : public Operation { +public: + + using Operator = Operator_; + + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = Operator::kIteratorAlgorithm; + static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator; + + using OperatorArguments = typename Operator::Arguments; + +protected: + + /// + ConvDescription description_; + +public: + + /// Constructor + Conv2dOperationBase(char const *name = "unknown_conv2d") { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.kind = OperationKind::kConv2d; + description_.conv_dim = Operator::kConvDim; + + description_.iterator_algorithm = IteratorAlgorithmMap::kId; + + description_.tile_description.threadblock_shape = make_Coord( + Operator::ThreadblockShape::kM, + Operator::ThreadblockShape::kN, + Operator::ThreadblockShape::kK); + + description_.tile_description.threadblock_stages = Operator::kStages; + + description_.tile_description.warp_count = make_Coord( + Operator::UnderlyingKernel::WarpCount::kM, + Operator::UnderlyingKernel::WarpCount::kN, + Operator::UnderlyingKernel::WarpCount::kK); + + description_.tile_description.math_instruction.instruction_shape = make_Coord( + Operator::InstructionShape::kM, + Operator::InstructionShape::kN, + Operator::InstructionShape::kK); + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + description_.tile_description.math_instruction.opcode_class = + OpcodeClassMap::kId; + + description_.tile_description.math_instruction.math_operation = + MathOperationMap::kId; + + description_.tile_description.minimum_compute_capability = + ArchMap::kMin; + + description_.tile_description.maximum_compute_capability = + ArchMap::kMax; + + description_.A = make_TensorDescription(); + description_.B = make_TensorDescription(); + description_.C = make_TensorDescription(); + description_.element_epilogue = NumericTypeMap::kId; + + // TODO: Add split k mode Serial and parallel to convolutions + // description_.split_k_mode = Operator::kSplitK ? SplitKMode::kSerial : SplitKMode::kNone; + + } + + /// Returns the description of the GEMM operation + virtual OperationDescription const & description() const { + return description_; + } +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Conv2d library operation class for cutlass profiler +// +/////////////////////////////////////////////////////////////////////////////////////////////////// +template +class Conv2dOperation : public Conv2dOperationBase { +public: + + using Operator = Operator_; + + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator; + + using OperatorArguments = typename Operator::Arguments; + +public: + /// Constructor + Conv2dOperation(char const *name = "unknown_conv2d_fprop") : Conv2dOperationBase(name) { + this->description_.conv_kind = ConvKindMap::kId; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + Conv2dConfiguration const *configuration) { + + + operator_args.problem_size = configuration->problem_size; + + operator_args.ref_A = + { + nullptr, + LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_B = + { + nullptr, + LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_C = + { + nullptr, + LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_D = + { + nullptr, + LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.split_k_mode = configuration->split_k_mode; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + ConvArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.output_op = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.output_op = params; + } + else { + return Status::kErrorInvalidProblem; + } + + operator_args.ref_A.reset(static_cast(const_cast(arguments->A))); + operator_args.ref_B.reset(static_cast(const_cast(arguments->B))); + operator_args.ref_C.reset(static_cast(const_cast(arguments->C))); + operator_args.ref_D.reset(static_cast(const_cast(arguments->D))); + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + Conv2dConfiguration const *configuration = + static_cast(configuration_ptr); + + ConvArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + return Operator::get_workspace_size(args); + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + //std::cout << "initialize library::Conv2dOperation" << std::endl; + //print_operator_args(args); + return op->initialize(args, device_workspace, stream); + + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args, device_workspace); + + if (status != Status::kSuccess) { + return status; + } + //std::cout << "run library::Conv2dOperation" << std::endl; + //print_operator_args(args); + return op->run(stream); + } + + /// Call print_operator_args from the Conv2dOperation::initialize() + // to dump arguments passed on to cutlass operator for debugging + void print_operator_args(OperatorArguments &operator_args) const { + std::cout << "Conv2dOperation::OperatorArguments" << std::endl + << " problem_size:" << std::endl + << operator_args.problem_size << std::endl + << " split_k_mode: " + << (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl + << " epilogue (alpha, beta): " + << operator_args.output_op.alpha << ", " + << operator_args.output_op.beta << std::endl + << " ref_A (ptr, {stride}): " + << operator_args.ref_A.data() << ", {" + << operator_args.ref_A.stride(0) << ", " + << operator_args.ref_A.stride(1) << ", " + << operator_args.ref_A.stride(2) << "}" << std::endl + << " ref_B (ptr, {stride}): " + << operator_args.ref_B.data() << ", {" + << operator_args.ref_B.stride(0) << ", " + << operator_args.ref_B.stride(1) << ", " + << operator_args.ref_B.stride(2) << "}" << std::endl + << " ref_C (ptr, {stride}): " + << operator_args.ref_C.data() << ", {" + << operator_args.ref_C.stride(0) << ", " + << operator_args.ref_C.stride(1) << ", " + << operator_args.ref_C.stride(2) << "}" << std::endl + << " ref_D (ptr, {stride}): " + << operator_args.ref_D.data() << ", {" + << operator_args.ref_D.stride(0) << ", " + << operator_args.ref_D.stride(1) << ", " + << operator_args.ref_D.stride(2) << "}" << std::endl; + } +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// DirectConv2d library operation class for cutlass profiler +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class DirectConv2dOperation : public Conv2dOperation { +public: + + using Operator = Operator_; + using Base = Conv2dOperation; + + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator; + + using OperatorArguments = typename Operator::Arguments; + +public: + /// Constructor + DirectConv2dOperation(char const *name = "unknown_direct)conv2d_fprop") : Conv2dOperation(name) { + this->description_.conv_kind = ConvKindMap::kId; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + Conv2dConfiguration const *configuration) { + + + operator_args.problem_size = configuration->problem_size; + + operator_args.ref_A = + { + nullptr, + LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_B = + { + nullptr, + LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_reordered_B = + { + nullptr, + LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_C = + { + nullptr, + LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_D = + { + nullptr, + LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.split_k_mode = configuration->split_k_mode; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + ConvArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.output_op = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.output_op = params; + } + else { + return Status::kErrorInvalidProblem; + } + + operator_args.ref_A.reset(static_cast(const_cast(arguments->A))); + operator_args.ref_B.reset(static_cast(const_cast(arguments->B))); + operator_args.ref_C.reset(static_cast(const_cast(arguments->C))); + operator_args.ref_D.reset(static_cast(const_cast(arguments->D))); + operator_args.ref_reordered_B.reset(static_cast(const_cast(arguments->reordered_B))); + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + Conv2dConfiguration const *configuration = + static_cast(configuration_ptr); + + ConvArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + return Operator::get_workspace_size(args); + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + //std::cout << "initialize library::Conv2dOperation" << std::endl; + //print_operator_args(args); + return op->initialize(args, device_workspace, stream); + + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args, device_workspace); + + if (status != Status::kSuccess) { + return status; + } + //std::cout << "run library::Conv2dOperation" << std::endl; + //print_operator_args(args); + return op->run(stream); + } + + /// Call print_operator_args from the Conv2dOperation::initialize() + // to dump arguments passed on to cutlass operator for debugging + void print_operator_args(OperatorArguments &operator_args) const { + std::cout << "Conv2dOperation::OperatorArguments" << std::endl + << " problem_size:" << std::endl + << operator_args.problem_size << std::endl + << " split_k_mode: " + << (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl + << " epilogue (alpha, beta): " + << operator_args.output_op.alpha << ", " + << operator_args.output_op.beta << std::endl + << " ref_A (ptr, {stride}): " + << operator_args.ref_A.data() << ", {" + << operator_args.ref_A.stride(0) << ", " + << operator_args.ref_A.stride(1) << ", " + << operator_args.ref_A.stride(2) << "}" << std::endl + << " ref_B (ptr, {stride}): " + << operator_args.ref_B.data() << ", {" + << operator_args.ref_B.stride(0) << ", " + << operator_args.ref_B.stride(1) << ", " + << operator_args.ref_B.stride(2) << "}" << std::endl + << " ref_C (ptr, {stride}): " + << operator_args.ref_C.data() << ", {" + << operator_args.ref_C.stride(0) << ", " + << operator_args.ref_C.stride(1) << ", " + << operator_args.ref_C.stride(2) << "}" << std::endl + << " ref_D (ptr, {stride}): " + << operator_args.ref_D.data() << ", {" + << operator_args.ref_D.stride(0) << ", " + << operator_args.ref_D.stride(1) << ", " + << operator_args.ref_D.stride(2) << "}" << std::endl; + } +}; + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/conv3d_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/conv3d_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..a7a0bacebbdfc90a99c78a18a28dd47cfe3fc63f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/conv3d_operation.h @@ -0,0 +1,385 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all CONV operation kinds in CUTLASS Library. +*/ + +#pragma once +#include +#include "cutlass/cutlass.h" +#include "cutlass/conv/kernel/default_conv3d_fprop.h" +#include "cutlass/conv/kernel/default_conv3d_dgrad.h" +#include "cutlass/conv/kernel/default_conv3d_wgrad.h" +#include "cutlass/conv/device/implicit_gemm_convolution.h" + +#include "cutlass/library/library.h" +#include "library_internal.h" +#include "cutlass/util/host_tensor.h" + +#include "cutlass/util/reference/host/convolution.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/core_io.h" +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class Conv3dOperationBase : public Operation { +public: + + using Operator = Operator_; + + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = Operator::kIteratorAlgorithm; + static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator; + + using OperatorArguments = typename Operator::Arguments; + +protected: + + /// + ConvDescription description_; + +public: + + /// Constructor + Conv3dOperationBase(char const *name = "unknown_conv3d") { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.kind = OperationKind::kConv3d; + description_.conv_dim = Operator::kConvDim; + + description_.iterator_algorithm = IteratorAlgorithmMap::kId; + + description_.tile_description.threadblock_shape = make_Coord( + Operator::ThreadblockShape::kM, + Operator::ThreadblockShape::kN, + Operator::ThreadblockShape::kK); + + description_.tile_description.threadblock_stages = Operator::kStages; + + description_.tile_description.warp_count = make_Coord( + Operator::UnderlyingKernel::WarpCount::kM, + Operator::UnderlyingKernel::WarpCount::kN, + Operator::UnderlyingKernel::WarpCount::kK); + + description_.tile_description.math_instruction.instruction_shape = make_Coord( + Operator::InstructionShape::kM, + Operator::InstructionShape::kN, + Operator::InstructionShape::kK); + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + description_.tile_description.math_instruction.opcode_class = + OpcodeClassMap::kId; + + description_.tile_description.minimum_compute_capability = + ArchMap::kMin; + + description_.tile_description.maximum_compute_capability = + ArchMap::kMax; + + description_.A = make_TensorDescription(); + description_.B = make_TensorDescription(); + description_.C = make_TensorDescription(); + description_.element_epilogue = NumericTypeMap::kId; + + } + + /// Returns the description of the GEMM operation + virtual OperationDescription const & description() const { + return description_; + } +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Conv2d library operation class for cutlass profiler +// +/////////////////////////////////////////////////////////////////////////////////////////////////// +template +class Conv3dOperation : public Conv3dOperationBase { +public: + + using Operator = Operator_; + + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator; + + using OperatorArguments = typename Operator::Arguments; + +public: + /// Constructor + Conv3dOperation(char const *name = "unknown_conv3d_fprop") : Conv3dOperationBase(name) { + this->description_.conv_kind = ConvKindMap::kId; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + Conv3dConfiguration const *configuration) { + + + operator_args.problem_size = configuration->problem_size; + + operator_args.ref_A = + { + nullptr, + LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_B = + { + nullptr, + LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_C = + { + nullptr, + LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.ref_D = + { + nullptr, + LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) + }; + + operator_args.split_k_mode = configuration->split_k_mode; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + ConvArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.output_op = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.output_op = params; + } + else { + return Status::kErrorInvalidProblem; + } + + operator_args.ref_A.reset(static_cast(const_cast(arguments->A))); + operator_args.ref_B.reset(static_cast(const_cast(arguments->B))); + operator_args.ref_C.reset(static_cast(const_cast(arguments->C))); + operator_args.ref_D.reset(static_cast(const_cast(arguments->D))); + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + Conv3dConfiguration const *configuration = + static_cast(configuration_ptr); + + ConvArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + return Operator::get_workspace_size(args); + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + //std::cout << "initialize library::Conv3dOperation" << std::endl; + //print_operator_args(args); + return op->initialize(args, device_workspace, stream); + + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args, device_workspace); + + if (status != Status::kSuccess) { + return status; + } + //std::cout << "run library::Conv3dOperation" << std::endl; + //print_operator_args(args); + return op->run(stream); + } + + /// Call print_operator_args from the Conv3dOperation::initialize() + // to dump arguments passed on to cutlass operator for debugging + void print_operator_args(OperatorArguments &operator_args) const { + std::cout << "Conv3dOperation::OperatorArguments" << std::endl + << " problem_size: " + << operator_args.problem_size << std::endl + << " split_k_mode: " + << (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl + << " epilogue (alpha, beta): " + << operator_args.output_op.alpha << ", " + << operator_args.output_op.beta << std::endl + << " ref_A (ptr, {stride}): " + << operator_args.ref_A.data() << ", {" + << operator_args.ref_A.stride(0) << ", " + << operator_args.ref_A.stride(1) << ", " + << operator_args.ref_A.stride(2) << ", " + << operator_args.ref_A.stride(3) << "}" << std::endl + << " ref_B (ptr, {stride}): " + << operator_args.ref_B.data() << ", {" + << operator_args.ref_B.stride(0) << ", " + << operator_args.ref_B.stride(1) << ", " + << operator_args.ref_B.stride(2) << ", " + << operator_args.ref_B.stride(3) << "}" << std::endl + << " ref_C (ptr, {stride}): " + << operator_args.ref_C.data() << ", {" + << operator_args.ref_C.stride(0) << ", " + << operator_args.ref_C.stride(1) << ", " + << operator_args.ref_C.stride(2) << ", " + << operator_args.ref_C.stride(3) << "}" << std::endl + << " ref_D (ptr, {stride}): " + << operator_args.ref_D.data() << ", {" + << operator_args.ref_D.stride(0) << ", " + << operator_args.ref_D.stride(1) << ", " + << operator_args.ref_D.stride(2) << ", " + << operator_args.ref_D.stride(3) << "}" << std::endl; + } +}; + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/gemm_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/gemm_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..62f07220c84d244357cf2eab93898e0c2d55f597 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/gemm_operation.h @@ -0,0 +1,1371 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all GEMM operation kinds in CUTLASS Library. +*/ + +#pragma once +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/gemm/device/gemm_sparse.h" +#include "cutlass/gemm/device/gemm_complex.h" +#include "cutlass/gemm/device/gemm_batched.h" +#include "cutlass/gemm/device/gemm_array.h" +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/default_gemm_universal.h" +#include "cutlass/gemm/kernel/default_gemm_planar_complex_universal.h" + +#include "cutlass/library/library.h" +#include "library_internal.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmOperationBase : public Operation { +public: + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = ElementC; + using LayoutD = LayoutC; + // assuming all tensors use same type for StrideIndex + using StrideIndex = typename Operator::LayoutA::Index; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +protected: + + /// + GemmDescription description_; + +public: + + /// Constructor + GemmOperationBase(char const *name = "unknown_gemm") { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.kind = OperationKind::kGemm; + description_.gemm_kind = GemmKind::kGemm; + + description_.tile_description.threadblock_shape = make_Coord( + Operator::ThreadblockShape::kM, + Operator::ThreadblockShape::kN, + Operator::ThreadblockShape::kK); + + description_.tile_description.threadblock_stages = Operator::kStages; + + description_.tile_description.warp_count = make_Coord( + Operator::GemmKernel::WarpCount::kM, + Operator::GemmKernel::WarpCount::kN, + Operator::GemmKernel::WarpCount::kK); + + description_.tile_description.math_instruction.instruction_shape = make_Coord( + Operator::InstructionShape::kM, + Operator::InstructionShape::kN, + Operator::InstructionShape::kK); + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + description_.tile_description.math_instruction.opcode_class = + OpcodeClassMap::kId; + + description_.tile_description.math_instruction.math_operation = + MathOperationMap::kId; + + description_.tile_description.minimum_compute_capability = + ArchMap::kMin; + + description_.tile_description.maximum_compute_capability = + ArchMap::kMax; + + description_.A = make_TensorDescription(Operator::kAlignmentA); + description_.B = make_TensorDescription(Operator::kAlignmentB); + description_.C = make_TensorDescription(Operator::kAlignmentC); + description_.D = make_TensorDescription(Operator::kAlignmentC); + description_.element_epilogue = NumericTypeMap::kId; + + description_.split_k_mode = SplitKMode::kNone; + description_.transform_A = ComplexTransformMap::kId; + description_.transform_B = ComplexTransformMap::kId; + } + + /// Returns the description of the GEMM operation + virtual OperationDescription const & description() const { + return description_; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmOperation : public GemmOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = ElementC; + using LayoutD = LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + GemmOperation(char const *name = "unknown_gemm"): GemmOperationBase(name) { + + this->description_.gemm_kind = GemmKind::kGemm; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + GemmConfiguration const *configuration) { + + operator_args.problem_size = configuration->problem_size; + + operator_args.ref_A = {nullptr, configuration->lda}; + operator_args.ref_B = {nullptr, configuration->ldb}; + operator_args.ref_C = {nullptr, configuration->ldc}; + operator_args.ref_D = {nullptr, configuration->ldd}; + + operator_args.split_k_slices = configuration->split_k_slices; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + GemmArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + operator_args.ref_A.reset(static_cast(arguments->A)); + operator_args.ref_B.reset(static_cast(arguments->B)); + operator_args.ref_C.reset(static_cast(arguments->C)); + operator_args.ref_D.reset(static_cast(arguments->D)); + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + GemmConfiguration const *configuration = + static_cast(configuration_ptr); + + GemmArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + return Operator::get_workspace_size(args); + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + return op->initialize(args, device_workspace, stream); + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args); + + if (status != Status::kSuccess) { + return status; + } + + return op->run(stream); + } + + void print_operator_args(OperatorArguments &operator_args) const { +#if 0 + std::cout << "GemmOperation::OperatorArguments" << std::endl; + std::cout << " problem_size: " << operator_args.problem_size.m() << ", "<< operator_args.problem_size.n() << "," << operator_args.problem_size.k() << std::endl; + std::cout << " alpha: " << operator_args.epilogue.alpha << std::endl; + std::cout << " alpha_ptr: " << operator_args.epilogue.alpha_ptr << std::endl; + std::cout << " beta: " << operator_args.epilogue.beta << std::endl; + std::cout << " beta_ptr: " << operator_args.epilogue.beta_ptr << std::endl; + std::cout << " ref_A.data(): " << operator_args.ref_A.data() << std::endl; + std::cout << " ref_A.stride: " << operator_args.ref_A.stride(0) << std::endl; + std::cout << " ref_B.data(): " << operator_args.ref_B.data() << std::endl; + std::cout << " ref_B.stride: " << operator_args.ref_B.stride(0) << std::endl; + std::cout << " ref_C.data(): " << operator_args.ref_C.data() << std::endl; + std::cout << " ref_C.stride: " << operator_args.ref_C.stride(0) << std::endl; +#endif + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmSparseOperation : public GemmOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = ElementC; + using LayoutD = LayoutC; + using ElementE = typename Operator::ElementE; + using LayoutE = typename Operator::LayoutE; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + GemmSparseOperation(char const *name = "unknown_gemm"): GemmOperationBase(name) { + + this->description_.kind = OperationKind::kSparseGemm; + this->description_.gemm_kind = GemmKind::kSparse; + this->description_.E = make_TensorDescription(Operator::kAlignmentE); + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + SparseGemmConfiguration const *configuration) { + + operator_args.problem_size = configuration->problem_size; + operator_args.ref_A = {nullptr, configuration->lda}; + operator_args.ref_B = {nullptr, configuration->ldb}; + operator_args.ref_C = {nullptr, configuration->ldc}; + operator_args.ref_D = {nullptr, configuration->ldd}; + operator_args.ref_E = {nullptr, configuration->lde}; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + SparseGemmArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + operator_args.ref_A.reset(static_cast(arguments->A)); + operator_args.ref_B.reset(static_cast(arguments->B)); + operator_args.ref_C.reset(static_cast(arguments->C)); + operator_args.ref_D.reset(static_cast(arguments->D)); + operator_args.ref_E.reset(static_cast(arguments->E)); + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + SparseGemmConfiguration const *configuration = + static_cast(configuration_ptr); + + SparseGemmArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + return Operator::get_workspace_size(args); + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + return op->initialize(args, device_workspace, stream); + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args); + + if (status != Status::kSuccess) { + return status; + } + + return op->run(stream); + } + + void print_operator_args(OperatorArguments &operator_args) const { +#if 0 + std::cout << "GemmOperation::OperatorArguments" << std::endl; + std::cout << " problem_size: " << operator_args.problem_size.m() << ", "<< operator_args.problem_size.n() << "," << operator_args.problem_size.k() << std::endl; + std::cout << " alpha: " << operator_args.epilogue.alpha << std::endl; + std::cout << " alpha_ptr: " << operator_args.epilogue.alpha_ptr << std::endl; + std::cout << " beta: " << operator_args.epilogue.beta << std::endl; + std::cout << " beta_ptr: " << operator_args.epilogue.beta_ptr << std::endl; + std::cout << " ref_A.data(): " << operator_args.ref_A.data() << std::endl; + std::cout << " ref_A.stride: " << operator_args.ref_A.stride(0) << std::endl; + std::cout << " ref_B.data(): " << operator_args.ref_B.data() << std::endl; + std::cout << " ref_B.stride: " << operator_args.ref_B.stride(0) << std::endl; + std::cout << " ref_C.data(): " << operator_args.ref_C.data() << std::endl; + std::cout << " ref_C.stride: " << operator_args.ref_C.stride(0) << std::endl; +#endif + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmUniversalOperation : public GemmOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = ElementC; + using LayoutD = LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + GemmUniversalOperation(char const *name = "unknown_gemm"): + GemmOperationBase(name) { + + this->description_.gemm_kind = GemmKind::kUniversal; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + GemmUniversalConfiguration const *configuration) { + + operator_args.mode = configuration->mode; + + operator_args.problem_size = configuration->problem_size; + operator_args.batch_count = configuration->batch_count; + + operator_args.lda = (configuration->lda); + operator_args.ldb = (configuration->ldb); + operator_args.ldc = (configuration->ldc); + operator_args.ldd = (configuration->ldd); + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + GemmUniversalArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + // update arguments + operator_args.ptr_A = arguments->A; + operator_args.ptr_B = arguments->B; + operator_args.ptr_C = arguments->C; + operator_args.ptr_D = arguments->D; + + operator_args.batch_stride_A = arguments->batch_stride_A; + operator_args.batch_stride_B = arguments->batch_stride_B; + operator_args.batch_stride_C = arguments->batch_stride_C; + operator_args.batch_stride_D = arguments->batch_stride_D; + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + GemmUniversalConfiguration const *configuration = + static_cast(configuration_ptr); + + GemmUniversalArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + + return size; + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + status = op->initialize(args, device_workspace, stream); + + return status; + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args); + + if (status != Status::kSuccess) { + return status; + } + + status = op->run(stream); + + return status; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmPlanarComplexOperation : public GemmOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = ElementC; + using LayoutD = LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + GemmPlanarComplexOperation(char const *name = "unknown_gemm"): GemmOperationBase(name) { + + this->description_.gemm_kind = GemmKind::kPlanarComplex; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + GemmPlanarComplexConfiguration const *configuration) { + + operator_args.mode = cutlass::gemm::GemmUniversalMode::kBatched; + operator_args.problem_size = configuration->problem_size; + operator_args.batch_count = configuration->batch_count; + + + operator_args.lda_real = configuration->lda_real; + operator_args.lda_imag = configuration->lda_imag; + operator_args.ldb_real = configuration->ldb_real; + operator_args.ldb_imag = configuration->ldb_imag; + operator_args.ldc_real = configuration->ldc_real; + operator_args.ldc_imag = configuration->ldc_imag; + operator_args.ldd_real = configuration->ldd_real; + operator_args.ldd_imag = configuration->ldd_imag; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + GemmPlanarComplexArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast const *>(arguments->alpha), + *static_cast const *>(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast const *>(arguments->alpha), + static_cast const *>(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + // update arguments + operator_args.ptr_A_real = arguments->A_real; + operator_args.ptr_A_imag = arguments->A_imag; + operator_args.ptr_B_real = arguments->B_real; + operator_args.ptr_B_imag = arguments->B_imag; + operator_args.ptr_C_real = arguments->C_real; + operator_args.ptr_C_imag = arguments->C_imag; + operator_args.ptr_D_real = arguments->D_real; + operator_args.ptr_D_imag = arguments->D_imag; + + operator_args.batch_stride_A = arguments->batch_stride_A_real; + operator_args.batch_stride_A_imag = arguments->batch_stride_A_imag; + operator_args.batch_stride_B = arguments->batch_stride_B_real; + operator_args.batch_stride_B_imag = arguments->batch_stride_B_imag; + operator_args.batch_stride_C = arguments->batch_stride_C_real; + operator_args.batch_stride_C_imag = arguments->batch_stride_C_imag; + operator_args.batch_stride_D = arguments->batch_stride_D_real; + operator_args.batch_stride_D_imag = arguments->batch_stride_D_imag; + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + GemmPlanarComplexConfiguration const *configuration = + static_cast(configuration_ptr); + + GemmPlanarComplexArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + + return size; + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + status = op->initialize(args, device_workspace, stream); + + return status; + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args); + + if (status != Status::kSuccess) { + return status; + } + + status = op->run(stream); + + return status; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmPlanarComplexArrayOperation : public GemmOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = ElementC; + using LayoutD = LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + GemmPlanarComplexArrayOperation(char const *name = "unknown_gemm"): GemmOperationBase(name) { + + this->description_.gemm_kind = GemmKind::kPlanarComplexArray; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + GemmPlanarComplexArrayConfiguration const *configuration) { + + operator_args.mode = cutlass::gemm::GemmUniversalMode::kArray; + operator_args.problem_size = configuration->problem_size; + operator_args.batch_count = configuration->batch_count; + + operator_args.lda_real = configuration->lda_real; + operator_args.lda_imag = configuration->lda_imag; + operator_args.ldb_real = configuration->ldb_real; + operator_args.ldb_imag = configuration->ldb_imag; + operator_args.ldc_real = configuration->ldc_real; + operator_args.ldc_imag = configuration->ldc_imag; + operator_args.ldd_real = configuration->ldd_real; + operator_args.ldd_imag = configuration->ldd_imag; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + GemmPlanarComplexArrayArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast const *>(arguments->alpha), + *static_cast const *>(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast const *>(arguments->alpha), + static_cast const *>(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + // update arguments + operator_args.ptr_A_real = arguments->A_real; + operator_args.ptr_A_imag = arguments->A_imag; + operator_args.ptr_B_real = arguments->B_real; + operator_args.ptr_B_imag = arguments->B_imag; + operator_args.ptr_C_real = arguments->C_real; + operator_args.ptr_C_imag = arguments->C_imag; + operator_args.ptr_D_real = arguments->D_real; + operator_args.ptr_D_imag = arguments->D_imag; + + operator_args.ptr_M = arguments->M; + operator_args.ptr_N = arguments->N; + operator_args.ptr_K = arguments->K; + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + GemmPlanarComplexArrayConfiguration const *configuration = + static_cast(configuration_ptr); + + GemmPlanarComplexArrayArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + + return size; + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + status = op->initialize(args, device_workspace, stream); + + return status; + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args); + + if (status != Status::kSuccess) { + return status; + } + + status = op->run(stream); + + return status; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmGroupedOperation : public GemmOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = ElementC; + using LayoutD = LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + GemmGroupedOperation(char const *name = "unknown_gemm"): + GemmOperationBase(name) { + + this->description_.gemm_kind = GemmKind::kGrouped; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &op_args, + GemmGroupedConfiguration const *config) { + + op_args.problem_count = config->problem_count; + op_args.threadblock_count = config->threadblock_count; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &op_args, + GemmGroupedArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + + op_args.output_op = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice) { + + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + + op_args.output_op = params; + } + else { + return Status::kErrorInvalidProblem; + } + + op_args.problem_sizes = arguments->problem_sizes; + + op_args.ptr_A = static_cast(arguments->ptr_A); + op_args.ptr_B = static_cast(arguments->ptr_B); + op_args.ptr_C = static_cast(arguments->ptr_C); + op_args.ptr_D = static_cast(arguments->ptr_D); + + op_args.lda = arguments->lda; + op_args.ldb = arguments->ldb; + op_args.ldc = arguments->ldc; + op_args.ldd = arguments->ldd; + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + GemmGroupedConfiguration const *configuration = + static_cast(configuration_ptr); + + GemmGroupedArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + + return size; + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + status = op->initialize(args, device_workspace, stream); + + return status; + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args); + + if (status != Status::kSuccess) { + return status; + } + + status = op->run(stream); + + return status; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/gemm_operation_3x.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/gemm_operation_3x.hpp new file mode 100644 index 0000000000000000000000000000000000000000..90ddac4839471b3a5dbedc53428fafe6ae592f0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/gemm_operation_3x.hpp @@ -0,0 +1,348 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all GEMM operation kinds in CUTLASS Library. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "library_internal.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmOperation3xBase : public Operation { +public: + using Operator = Operator_; + using OperatorArguments = typename Operator::Arguments; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = typename Operator::ElementD; + using LayoutD = typename Operator::LayoutD; + // assuming all tensors use same type for StrideIndex + using StrideIndex = typename Operator::LayoutA::Index; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + +private: + + GemmDescription description_; + +public: + + /// Constructor + GemmOperation3xBase(char const *name = "unknown_gemm", GemmKind gemm_kind_ = GemmKind::kGemm) { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.kind = OperationKind::kGemm; + description_.gemm_kind = gemm_kind_; + + description_.tile_description.threadblock_shape = make_Coord( + Operator::ThreadblockShape::kM, + Operator::ThreadblockShape::kN, + Operator::ThreadblockShape::kK); + + if constexpr (Operator::ArchTag::kMinComputeCapability >= 90) { + description_.tile_description.cluster_shape = make_Coord( + Operator::ClusterShape::kM, + Operator::ClusterShape::kN, + Operator::ClusterShape::kK); + } + + description_.tile_description.threadblock_stages = Operator::kStages; + + description_.tile_description.warp_count = make_Coord( + Operator::WarpCount::kM, + Operator::WarpCount::kN, + Operator::WarpCount::kK); + + description_.tile_description.math_instruction.instruction_shape = make_Coord( + Operator::InstructionShape::kM, + Operator::InstructionShape::kN, + Operator::InstructionShape::kK); + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + description_.tile_description.math_instruction.opcode_class = + OpcodeClassMap::kId; + + description_.tile_description.math_instruction.math_operation = + MathOperationMap::kId; + + description_.tile_description.minimum_compute_capability = + ArchMap::kMin; + + description_.tile_description.maximum_compute_capability = + ArchMap::kMax; + + description_.A = make_TensorDescription(Operator::kAlignmentA); + description_.B = make_TensorDescription(Operator::kAlignmentB); + description_.C = make_TensorDescription(Operator::kAlignmentC); + description_.D = make_TensorDescription(Operator::kAlignmentD); + description_.element_epilogue = NumericTypeMap::kId; + + description_.split_k_mode = SplitKMode::kNone; + description_.transform_A = ComplexTransformMap::kId; + description_.transform_B = ComplexTransformMap::kId; + } + + /// Returns the description of the GEMM operation + virtual OperationDescription const & description() const { + return description_; + } + + /// Returns the description of the GEMM operation + GemmDescription const& get_gemm_description() const { + return description_; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class GemmUniversal3xOperation : public GemmOperation3xBase { +public: + + using Operator = Operator_; + using OperatorArguments = typename Operator::Arguments; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementD = typename Operator::ElementD; + using LayoutD = typename Operator::LayoutD; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using CollectiveMainloop = typename Operator::CollectiveMainloop; + using CollectiveEpilogue = typename Operator::CollectiveEpilogue; + using ThreadEpilogueOp = typename CollectiveEpilogue::ThreadEpilogueOp; + +public: + + /// Constructor + GemmUniversal3xOperation(char const *name = "unknown_gemm"): + GemmOperation3xBase(name, GemmKind::kUniversal) {} + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, GemmUniversalConfiguration const *configuration) { + // NOTE: GemmUniversalConfiguration does not contain problem shapes or batch strides + // Do nothing here and construct kernel arguments in update_arguments_ instead + // We also cannot construct TMA descriptors without all the arguments available + + operator_args.mode = configuration->mode; + return Status::kSuccess; + } + + template + struct UpdateFusionArgs { + static Status update_(FusionArgs const& fusion_args, GemmUniversalArguments const &arguments) { + // If a custom EVT is instantiated then it is the users's responsibility + // to ensure alpha and beta are updated appropriately + return Status::kSuccess; + } + }; + + template + struct UpdateFusionArgs> { + static Status update_(FusionArgs& fusion_args, GemmUniversalArguments const &arguments) { + if (arguments.pointer_mode == ScalarPointerMode::kHost) { + fusion_args.alpha = *static_cast(arguments.alpha); + fusion_args.beta = *static_cast(arguments.beta); + fusion_args.alpha_ptr = nullptr; + fusion_args.beta_ptr = nullptr; + + return Status::kSuccess; + } + else if (arguments.pointer_mode == ScalarPointerMode::kDevice) { + fusion_args.alpha = 0; + fusion_args.beta = 0; + fusion_args.alpha_ptr = static_cast(arguments.alpha); + fusion_args.beta_ptr = static_cast(arguments.beta); + + return Status::kSuccess; + } + else { + return Status::kErrorInvalidProblem; + } + } + }; + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, GemmUniversalArguments const *arguments) { + Status status = Status::kSuccess; + + status = UpdateFusionArgs::update_( + operator_args.epilogue.thread, *arguments); + if (status != Status::kSuccess) { + return status; + } + + // TODO: type erase Arguments structure in 3.0 GEMM + operator_args.problem_shape = cute::make_shape( + arguments->problem_size.m(), + arguments->problem_size.n(), + arguments->problem_size.k(), + arguments->batch_count); + + // update arguments + operator_args.mainloop.ptr_A = static_cast(arguments->A); + operator_args.mainloop.ptr_B = static_cast(arguments->B); + operator_args.epilogue.ptr_C = static_cast(arguments->C); + operator_args.epilogue.ptr_D = static_cast(arguments->D); + + operator_args.mainloop.dA = cute::make_int_tuple_from( + arguments->lda, arguments->batch_stride_A); + operator_args.mainloop.dB = cute::make_int_tuple_from( + arguments->ldb, arguments->batch_stride_B); + operator_args.epilogue.dC = cute::make_int_tuple_from( + arguments->ldc, arguments->batch_stride_C); + operator_args.epilogue.dD = operator_args.epilogue.dC; + + /* Query device SM count to pass onto the kernel as an argument, where needed */ + operator_args.hw_info.sm_count = arguments->sm_count; + + if constexpr (!std::is_const_v) { + using Enum_t = decltype(operator_args.scheduler.raster_order); + switch (arguments->raster_order) { + case RasterOrder::kAlongN: + operator_args.scheduler.raster_order = Enum_t::AlongN; + break; + case RasterOrder::kAlongM: + operator_args.scheduler.raster_order = Enum_t::AlongM; + break; + default: + operator_args.scheduler.raster_order = Enum_t::Heuristic; + } + } + + return status; + } + +public: + + /// Returns success if the operation can proceed + Status can_implement( + void const *configuration_ptr, void const *arguments_ptr) const override { + + GemmUniversalConfiguration const *configuration = + static_cast(configuration_ptr); + GemmUniversalArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + auto status = update_arguments_(args, arguments); + if (status != Status::kSuccess) { + return status; + } + + // can_implement rules may need access to problem shape + args.problem_shape = cute::make_shape( + configuration->problem_size.m(), + configuration->problem_size.n(), + configuration->problem_size.k(), + configuration->batch_count); + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + uint64_t get_host_workspace_size(void const *configuration) const override { + return sizeof(Operator); + } + + /// Gets the device-side workspace + uint64_t get_device_workspace_size( + void const *configuration_ptr,void const *arguments_ptr) const override { + + OperatorArguments args; + auto status = update_arguments_( + args, static_cast(arguments_ptr)); + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + return size; + } + + /// Initializes the workspace + Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const override { + Operator *op = new (host_workspace) Operator; + return Status::kSuccess; + } + + /// Runs the kernel + Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const override { + + OperatorArguments args; + Status status = update_arguments_(args, static_cast(arguments_ptr)); + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + // We need to call initialize() since we have to rebuild TMA desc for every new set of args + status = op->run(args, device_workspace, stream); + return status; + } +}; +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::library + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/handle.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/handle.cu new file mode 100644 index 0000000000000000000000000000000000000000..bdea2f49f698523f64ad3e4c1d5f0da78c12d19c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/handle.cu @@ -0,0 +1,1187 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief CUTLASS Library handle. +*/ +#include +#include +#include + +#include "cutlass/library/handle.h" +#include "cutlass/library/singleton.h" +#include "cutlass/library/util.h" + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Constructor +Handle::Handle( + cudaStream_t stream, + size_t workspace_size +): + provider_(Provider::kCUTLASS), + stream_(stream), + workspace_(nullptr), + workspace_size_(0), + scalar_pointer_mode_(ScalarPointerMode::kHost), + last_operation_(nullptr) { + + int device_idx = -1; + + cudaError_t error = cudaGetDevice(&device_idx); + if (error != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() failed"); + } + + error = cudaGetDeviceProperties(&device_, device_idx); + if (error != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed"); + } + + set_workspace_size(workspace_size); + + Singleton::get(); +} + +/// Destructor +Handle::~Handle() { + if (workspace_) { + + if (workspace_) { + cudaFree(workspace_); + } + + workspace_ = nullptr; + workspace_size_ = 0; + } +} + +/// Move constructor +Handle::Handle(Handle && handle) { + device_ = handle.device_; + workspace_size_ = handle.workspace_size_; + workspace_ = handle.workspace_; + stream_ = handle.stream_; + scalar_pointer_mode_ = handle.scalar_pointer_mode_; + + handle.workspace_ = nullptr; + handle.workspace_size_ = 0; +} + +/// Move assignment operator +Handle & Handle::operator=(Handle && handle) { + + provider_ = handle.provider_; + device_ = handle.device_; + workspace_size_ = handle.workspace_size_; + workspace_ = handle.workspace_; + stream_ = handle.stream_; + scalar_pointer_mode_ = handle.scalar_pointer_mode_; + + handle.workspace_ = nullptr; + handle.workspace_size_ = 0; + + return *this; +} + +int Handle::compute_capability() const { + return device_.major * 10 + device_.minor; +} + +/// Sets the current CUDA stream +void Handle::set_stream(cudaStream_t stream) { + stream_ = stream; +} + +/// Gets the current CUDA stream +cudaStream_t Handle::get_stream() const { + return stream_; +} + +/// Gets the current provider +Provider Handle::get_provider() const { + return provider_; +} + +/// Sets the provider of operations +void Handle::set_provider(Provider provider) { + provider_ = provider; +} + +/// Gets the device workspace size +size_t Handle::get_workspace_size() const { + return workspace_size_; +} + +/// Gets a pointer to the device workspace allocation in Global Memory +void *Handle::get_workspace() const { + return workspace_; +} + +/// Sets the size of device workspace, invalidating previous calls to get_device_workspace() +void Handle::set_workspace_size(size_t bytes) { + if (bytes != workspace_size_) { + + if (workspace_) { + cudaFree(workspace_); + } + + workspace_ = nullptr; + workspace_size_ = bytes; + + if (workspace_size_) { + + cudaError_t error = cudaMalloc((void **)&workspace_, workspace_size_); + + if (error != cudaSuccess) { + throw std::runtime_error("Failed to allocate workspace"); + } + } + } + + if (workspace_) { + cudaError_t error = cudaMemset(workspace_, 0, workspace_size_); + + if (error != cudaSuccess) { + throw std::runtime_error("Failed to clear workspace"); + } + } +} + +/// Gets the scalar pointer mode +ScalarPointerMode Handle::get_scalar_pointer_mode() const { + return scalar_pointer_mode_; +} + +/// Sets the scalar pointer mode +void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) { + scalar_pointer_mode_ = mode; +} + +/// Gets the last operation +Operation const *Handle::get_last_operation() const { + return last_operation_; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns the maximum required alignment for each operator +static int maximum_alignment_requirement(GemmDescription const &desc) { + return std::max( + std::max(desc.A.alignment, desc.B.alignment), desc.C.alignment); +} + +/// Returns the largest alignment (in units of elements) the problem satisfies, starting from a +/// given upper limit. +static int gemm_problem_alignment( + int M, + int N, + int K, + NumericTypeID element_A, + void const *ptr_A, + int64_t lda, + int64_t batch_stride_A, + NumericTypeID element_B, + void const *ptr_B, + int64_t ldb, + int64_t batch_stride_B, + NumericTypeID element_C, + void const * ptr_C, + int64_t ldc, + int64_t batch_stride_C, + void const * ptr_D, + int64_t ldd, + int64_t batch_stride_D, + int max_alignment_in_bytes = 16 +) { + + void const *pointers[] = { + ptr_A, ptr_B, ptr_C, ptr_D + }; + + int64_t extents[] = { + M, N, K, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D + }; + + NumericTypeID elements[] = { + element_A, element_B, element_C + }; + + for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) { + + bool satisfied = true; + + // Can pointers satisfy this? + for (void const *ptr : pointers) { + std::uintptr_t int_ptr = reinterpret_cast(ptr); + + if (int_ptr % max_alignment_in_bytes) { + satisfied = false; + break; + } + } + + if (!satisfied) { + continue; + } + + // Compute the maximum alignment based on element data types + int max_element_alignment = 0; + + for (NumericTypeID type_id : elements) { + int element_alignment = max_alignment_in_bytes * 8 / library::sizeof_bits(type_id); + max_element_alignment = std::max(max_element_alignment, element_alignment); + } + + // Can the problem size and leading dimensions satisfy this? + for (int64_t extent : extents) { + if (extent % max_element_alignment) { + satisfied = false; + break; + } + } + + if (!satisfied) { + continue; + } + + // Yes + return max_element_alignment; + } + + // No alignment satisfies this problem + return 0; +} + +/// Find the best kernel in descending order of preference. +static Operation const * find_gemm_operation( + GemmOperationFunctionalMap::const_iterator operators_it, + GemmPreferenceKey const preference_key) { + + auto cc_it = operators_it->second.upper_bound(preference_key); + + if (cc_it == operators_it->second.begin()) { + return nullptr; + } + + Operation const *operation = nullptr; + + // Search in descending order of compute capability + do { + --cc_it; + + // Search tile sizes in order, for now. + for (auto const * op : cc_it->second) { + + GemmDescription const &desc = static_cast(op->description()); + + int min_cc = desc.tile_description.minimum_compute_capability; + int max_cc = desc.tile_description.maximum_compute_capability; + + int op_alignment = maximum_alignment_requirement(desc); + + if ((min_cc <= preference_key.compute_capability) && + (preference_key.compute_capability <= max_cc) && + (op_alignment <= preference_key.alignment)) { + + operation = op; + break; + } + } + } while (!operation && cc_it != operators_it->second.begin()); + + return operation; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Executes a GEMM computation: D <= alpha * A*B + beta * C +Status Handle::gemm( + + int M, /// GEMM M dimension + int N, /// GEMM N dimension + int K, /// GEMM K dimension + + NumericTypeID element_compute, /// Data type of internal accumulation + + NumericTypeID element_scalar, /// Data type of alpha/beta scalars + + void const *alpha, /// Pointer to alpha scalar + + NumericTypeID element_A, /// Data type of A matrix elements + LayoutTypeID layout_A, /// Layout of A matrix + ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices + + void const * ptr_A, /// Pointer to A matrix in Global Memory + int64_t lda, /// Leading dimension of A matrix + + NumericTypeID element_B, /// Data type of B matrix elements + LayoutTypeID layout_B, /// Layout of B matrix + ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices + + void const * ptr_B, /// Pointer to B matrix in Global Memory + int64_t ldb, /// Leading dimension of B matrix + + void const * beta, /// Pointer to beta scalar + + NumericTypeID element_C, /// Data type of C and D matrices + + void const * ptr_C, /// Pointer to C matrix + int64_t ldc, /// Leading dimension of C matrix + + void * ptr_D, /// Pointer to D matrix + int64_t ldd /// Leading dimension of D matrix +) { + + // + // Find the operation + // + + GemmFunctionalKey key( + provider_, + GemmKind::kGemm, + element_compute, + element_scalar, + element_A, + layout_A, + transform_A, + element_B, + layout_B, + transform_B, + element_C, // C/D are same type and col major default + LayoutTypeID::kColumnMajor, + element_C, + LayoutTypeID::kColumnMajor + ); + + auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); + + if (operators_it == Singleton::get().operation_table.gemm_operations.end()) { + return cutlass::Status::kErrorNotSupported; + } + + if (operators_it->second.empty()) { + return cutlass::Status::kErrorNotSupported; + } + + // + // Compute the largest alignment restriction the kernel can satisfy. + // + + // Maximum alignment expectation among all kernels (in units of bytes) + int const kMaximumAlignmentSize = 16; + + int alignment = gemm_problem_alignment( + M, N, K, + element_A, ptr_A, lda, 0, + element_B, ptr_B, ldb, 0, + element_C, ptr_C, ldc, 0, + ptr_D, ldd, 0, kMaximumAlignmentSize + ); + + // + // Find the best kernel in descending order of preference. + // + + GemmPreferenceKey preference_key(compute_capability(), alignment); + + Operation const *operation = find_gemm_operation(operators_it, preference_key); + + if (!operation) { + return cutlass::Status::kErrorNotSupported; + } + + last_operation_ = operation; + + // + // Configure operation + // + + GemmConfiguration configuration{ + {M, N, K}, + lda, + ldb, + ldc, + ldd, + 1 + }; + + // Query host work space size + uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); + + if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) { + return cutlass::Status::kErrorNotSupported; + } + + char host_workspace[kHostWorkspaceSize]; + + // Query device workspace size + uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration); + + if (uint64_t(workspace_size_) < device_workspace_size_needed) { + return cutlass::Status::kErrorNotSupported; + } + + // Initialize host and device workspaces + Status status = operation->initialize( + &configuration, + host_workspace, + workspace_, + stream_); + + if (status != cutlass::Status::kSuccess) { + return status; + } + + // Run the operator + GemmArguments arguments{ + ptr_A, + ptr_B, + ptr_C, + ptr_D, + alpha, + beta, + scalar_pointer_mode_ + }; + + return operation->run(&arguments, host_workspace, workspace_, stream_); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Executes a GEMM computation: D <= alpha * A*B + beta * C. +// +// Supports batched-strided, batched array or split-K serial or split-K parallel. +// +Status Handle::gemm_universal( + + GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched + + int M, /// GEMM M dimension + int N, /// GEMM N dimension + int K, /// GEMM K dimension + + NumericTypeID element_compute, /// Data type of internal accumulation + + NumericTypeID element_scalar, /// Data type of alpha/beta scalars + + void const *alpha, /// Pointer to alpha scalar + + NumericTypeID element_A, /// Data type of A matrix elements + LayoutTypeID layout_A, /// Layout of A matrix + ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices + void const * ptr_A, /// Pointer to A matrix in Global Memory + int64_t lda, /// Leading dimension of A matrix + + NumericTypeID element_B, /// Data type of B matrix elements + LayoutTypeID layout_B, /// Layout of B matrix + ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices + void const * ptr_B, /// Pointer to B matrix in Global Memory + int64_t ldb, /// Leading dimension of B matrix + + void const * beta, /// Pointer to beta scalar + + NumericTypeID element_C, /// Data type of C matrix + LayoutTypeID layout_C, /// Layout of D matrix + void const * ptr_C, /// Pointer to C matrix + int64_t ldc, /// Leading dimension of C matrix + + NumericTypeID element_D, /// Data type of D matrix + LayoutTypeID layout_D, /// Layout of D matrix + void * ptr_D, /// Pointer to D matrix + int64_t ldd, /// Leading dimension of D matrix + + int batch_count, /// Batch count or number of split-K slices + + int64_t batch_stride_A, /// Batch stride of A operand + int64_t batch_stride_B, /// Batch stride of B operand + int64_t batch_stride_C, /// Batch stride of C operand + int64_t batch_stride_D /// Batch stride of D operand +) { + + // + // Find the operation + // + + GemmFunctionalKey key( + provider_, + GemmKind::kUniversal, + element_compute, + element_scalar, + element_A, + layout_A, + transform_A, + element_B, + layout_B, + transform_B, + element_C, + layout_C, + element_D, + layout_D + ); + + auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); + + if (operators_it == Singleton::get().operation_table.gemm_operations.end()) { + return cutlass::Status::kErrorNotSupported; + } + + if (operators_it->second.empty()) { + return cutlass::Status::kErrorNotSupported; + } + + // + // Compute the largest alignment restriction the kernel can satisfy. + // + + // Maximum alignment expectation among all kernels (in units of bytes) + int const kMaximumAlignmentSize = 16; + + void const *ptr_A_check = ptr_A; + void const *ptr_B_check = ptr_B; + void const *ptr_C_check = ptr_C; + void * ptr_D_check = ptr_D; + + // Ignore alignment of pointers to pointers. We can't check this from the host, + // as each batch index has its own pointer in device memory. + if (mode == GemmUniversalMode::kArray) { + ptr_A_check = nullptr; + ptr_B_check = nullptr; + ptr_C_check = nullptr; + ptr_D_check = nullptr; + } + + int alignment = gemm_problem_alignment( + M, N, K, + element_A, ptr_A_check, lda, 0, + element_B, ptr_B_check, ldb, 0, + element_C, ptr_C_check, ldc, 0, + ptr_D_check, ldd, 0, kMaximumAlignmentSize + ); + + // + // Find the best kernel in descending order of preference. + // + + GemmPreferenceKey preference_key(compute_capability(), alignment); + + Operation const *operation = find_gemm_operation(operators_it, preference_key); + + if (!operation) { + return cutlass::Status::kErrorNotSupported; + } + + last_operation_ = operation; + + // + // Configure operation + // + + GemmUniversalConfiguration configuration{ + mode, + {M, N, K}, + batch_count, + lda, + ldb, + ldc, + ldd + }; + + // Query host work space size + uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); + + if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) { + return cutlass::Status::kErrorNotSupported; + } + + char host_workspace[kHostWorkspaceSize]; + + GemmUniversalArguments arguments{ + {M, N, K}, + batch_count, + ptr_A, + ptr_B, + ptr_C, + ptr_D, + alpha, + beta, + scalar_pointer_mode_, + lda, + ldb, + ldc, + ldd, + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D + }; + + // Query device workspace size + uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration, &arguments); + + if (uint64_t(workspace_size_) < device_workspace_size_needed) { + return cutlass::Status::kErrorNotSupported; + } + + // Initialize host and device workspaces + Status status = operation->initialize( + &configuration, + host_workspace, + workspace_, + stream_); + + if (status != cutlass::Status::kSuccess) { + return status; + } + + // Run the operator + + return operation->run(&arguments, host_workspace, workspace_, stream_); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Planar complex GEMM +Status Handle::gemm_planar_complex( + + int M, /// GEMM M dimension + int N, /// GEMM N dimension + int K, /// GEMM K dimension + + NumericTypeID element_compute, /// Data type of internal accumulation + + NumericTypeID element_scalar, /// Data type of alpha/beta scalars + + void const *alpha, /// Pointer to alpha scalar + + NumericTypeID element_A, /// Data type of A matrix elements + LayoutTypeID layout_A, /// Layout of A matrix + ComplexTransform transform_A, /// Complex transformation applied to A matrix + + void const * ptr_A_real, /// Pointer to real part of A matrix + void const * ptr_A_imag, /// Pointer to imaginary part of A matrix + int64_t lda_real, /// Leading dimension of real part of A matrix + int64_t lda_imag, /// Leading dimension of imaginary part of A matrix + + NumericTypeID element_B, /// Data type of B matrix elements + LayoutTypeID layout_B, /// Layout of B matrix + ComplexTransform transform_B, /// Complex transformation applied to B matrix + + void const * ptr_B_real, /// Pointer to real part of B matrix + void const * ptr_B_imag, /// Pointer to imaginary part of B matrix + int64_t ldb_real, /// Leading dimension of real part of B matrix + int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix + + void const * beta, /// Pointer to beta scalar + + NumericTypeID element_C, /// Data type of C and D matrix + + void const * ptr_C_real, /// Pointer to real part of C matrix + void const * ptr_C_imag, /// Pointer to imaginary part of C matrix + int64_t ldc_real, /// Leading dimension of real part of C matrix + int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix + + void * ptr_D_real, /// Pointer to real part of D matrix + void * ptr_D_imag, /// Pointer to imaginary part of D matrix + int64_t ldd_real, /// Leading dimension of real part of D matrix + int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix + + int batch_count, /// Number of batched GEMMs to execute + + int64_t batch_stride_A_real, + int64_t batch_stride_A_imag, + + int64_t batch_stride_B_real, + int64_t batch_stride_B_imag, + + int64_t batch_stride_C_real, + int64_t batch_stride_C_imag, + + int64_t batch_stride_D_real, + int64_t batch_stride_D_imag +) { + + // + // Find the operation + // + + GemmFunctionalKey key( + provider_, + GemmKind::kPlanarComplex, + element_compute, + element_scalar, + element_A, + layout_A, + transform_A, + element_B, + layout_B, + transform_B, + element_C, // C/D are same type + LayoutTypeID::kColumnMajor, + element_C, + LayoutTypeID::kColumnMajor + ); + + auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); + + if (operators_it == Singleton::get().operation_table.gemm_operations.end()) { + return cutlass::Status::kErrorNotSupported; + } + + if (operators_it->second.empty()) { + return cutlass::Status::kErrorNotSupported; + } + + // + // Compute the largest alignment restriction the kernel can satisfy. + // + + // Maximum alignment expectation among all kernels (in units of bytes) + int const kMaximumAlignmentSize = 16; + + int alignment = std::max( + gemm_problem_alignment( + M, N, K, + element_A, ptr_A_real, lda_real, batch_stride_A_real, + element_B, ptr_B_real, ldb_real, batch_stride_B_real, + element_C, ptr_C_real, ldc_real, batch_stride_C_real, + ptr_D_real, ldd_real, batch_stride_D_real, kMaximumAlignmentSize + ), + gemm_problem_alignment( + M, N, K, + element_A, ptr_A_imag, lda_imag, batch_stride_A_imag, + element_B, ptr_B_imag, ldb_imag, batch_stride_B_imag, + element_C, ptr_C_imag, ldc_imag, batch_stride_C_imag, + ptr_D_imag, ldd_imag, batch_stride_D_imag, kMaximumAlignmentSize + ) + ); + + // + // Find the best kernel in descending order of preference. + // + + GemmPreferenceKey preference_key(compute_capability(), alignment); + + Operation const *operation = find_gemm_operation(operators_it, preference_key); + + if (!operation) { + return cutlass::Status::kErrorNotSupported; + } + + last_operation_ = operation; + + // + // Configure operation + // + + GemmPlanarComplexConfiguration configuration{ + GemmUniversalMode::kBatched, + {M, N, K}, + batch_count, + lda_real, + lda_imag, + ldb_real, + ldb_imag, + ldc_real, + ldc_imag, + ldd_real, + ldd_imag + }; + + // Query host work space size + uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); + + if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) { + return cutlass::Status::kErrorNotSupported; + } + + char host_workspace[kHostWorkspaceSize]; + + // Query device workspace size + uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration); + + if (uint64_t(workspace_size_) < device_workspace_size_needed) { + return cutlass::Status::kErrorNotSupported; + } + + // Initialize host and device workspaces + Status status = operation->initialize( + &configuration, + host_workspace, + workspace_, + stream_); + + if (status != cutlass::Status::kSuccess) { + return status; + } + + // Run the operator + GemmPlanarComplexArguments arguments{ + ptr_A_real, + ptr_A_imag, + ptr_B_real, + ptr_B_imag, + ptr_C_real, + ptr_C_imag, + ptr_D_real, + ptr_D_imag, + alpha, + beta, + scalar_pointer_mode_, + batch_stride_A_real, + batch_stride_A_imag, + batch_stride_B_real, + batch_stride_B_imag, + batch_stride_C_real, + batch_stride_C_imag, + batch_stride_D_real, + batch_stride_D_imag + }; + + return operation->run(&arguments, host_workspace, workspace_, stream_); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Planar complex batched GEMM loading pointers from arrays in global memory +Status Handle::gemm_planar_complex_array( + + int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid) + int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid) + int expected_K, /// Expected GEMM K dimension + int batch_count, /// Number of independent GEMM computations to execute + + int const *M, /// Array containing the GEMM M dimension for each batch index + int const *N, /// Array containing the GEMM N dimension for each batch index + int const *K, /// Array containing the GEMM K dimension for each batch index + + NumericTypeID element_compute, /// Data type of internal accumulation + + NumericTypeID element_scalar, /// Data type of alpha/beta scalars + + void const *alpha, /// Pointer to alpha scalar + + NumericTypeID element_A, /// Data type of A matrix elements + LayoutTypeID layout_A, /// Layout of A matrix + ComplexTransform transform_A, /// Complex transformation applied to A matrix + + void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices + void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices + + int64_t lda_real, /// Leading dimension of real part of A matrix + int64_t lda_imag, /// Leading dimension of imaginary part of A matrix + + NumericTypeID element_B, /// Data type of B matrix elements + LayoutTypeID layout_B, /// Layout of B matrix + ComplexTransform transform_B, /// Complex transformation applied to B matrix + + void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices + void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices + + int64_t ldb_real, /// Leading dimension of real part of B matrix + int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix + + void const * beta, /// Pointer to beta scalar + + NumericTypeID element_C, /// Data type of C and D matrix + + void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices + void const * const * ptr_C_imag, /// Pointer to array containing pointers to imaginary part of C matrices + + int64_t ldc_real, /// Leading dimension of real part of C matrix + int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix + + void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices + void * const * ptr_D_imag, /// Pointer to array containing pointers to imaginary part of D matrices + + int64_t ldd_real, /// Leading dimension of real part of D matrix + int64_t ldd_imag /// Leading dimension of imaginary part of D matrix +) { + + // + // Find the operation + // + + GemmFunctionalKey key( + provider_, + GemmKind::kPlanarComplexArray, + element_compute, + element_scalar, + element_A, + layout_A, + transform_A, + element_B, + layout_B, + transform_B, + element_C, // C/D are same type + LayoutTypeID::kColumnMajor, + element_C, + LayoutTypeID::kColumnMajor + ); + + auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); + + if (operators_it == Singleton::get().operation_table.gemm_operations.end()) { + return cutlass::Status::kErrorNotSupported; + } + + if (operators_it->second.empty()) { + return cutlass::Status::kErrorNotSupported; + } + + // + // Compute the largest alignment restriction the kernel can satisfy. + // + + // Maximum alignment expectation among all kernels (in units of bytes) + int const kMaximumAlignmentSize = 16; + + int alignment = std::max( + gemm_problem_alignment( + expected_M, expected_N, expected_K, + element_A, nullptr, lda_real, 0, + element_B, nullptr, ldb_real, 0, + element_C, nullptr, ldc_real, 0, + nullptr, ldd_real, 0, kMaximumAlignmentSize + ), + gemm_problem_alignment( + expected_M, expected_N, expected_K, + element_A, nullptr, lda_imag, 0, + element_B, nullptr, ldb_imag, 0, + element_C, nullptr, ldc_imag, 0, + nullptr, ldd_imag, 0, kMaximumAlignmentSize + ) + ); + + // + // Find the best kernel in descending order of preference. + // + + GemmPreferenceKey preference_key(compute_capability(), alignment); + + Operation const *operation = find_gemm_operation(operators_it, preference_key); + + if (!operation) { + return cutlass::Status::kErrorNotSupported; + } + + last_operation_ = operation; + + // + // Configure operation + // + + GemmPlanarComplexArrayConfiguration configuration{ + {expected_M, expected_N, expected_K}, + batch_count, + lda_real, + lda_imag, + ldb_real, + ldb_imag, + ldc_real, + ldc_imag, + ldd_real, + ldd_imag + }; + + // Query host work space size + uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); + + if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) { + return cutlass::Status::kErrorNotSupported; + } + + char host_workspace[kHostWorkspaceSize]; + + // Query device workspace size + uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration); + + if (uint64_t(workspace_size_) < device_workspace_size_needed) { + return cutlass::Status::kErrorNotSupported; + } + + // Initialize host and device workspaces + Status status = operation->initialize( + &configuration, + host_workspace, + workspace_, + stream_); + + if (status != cutlass::Status::kSuccess) { + return status; + } + + // Run the operator + GemmPlanarComplexArrayArguments arguments{ + M, N, K, + ptr_A_real, + ptr_A_imag, + ptr_B_real, + ptr_B_imag, + ptr_C_real, + ptr_C_imag, + ptr_D_real, + ptr_D_imag, + alpha, + beta, + scalar_pointer_mode_ + }; + + return operation->run(&arguments, host_workspace, workspace_, stream_); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Finds conv operation instances with Conv::ElementC = Reduction::ElementWorkspace +Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation) { + + ConvDescription const &conv_desc = + static_cast(operation->description()); + + // if the curren conv operation accumulator and output data type match return operation + if(conv_desc.tile_description.math_instruction.element_accumulator == conv_desc.C.element) { + return operation; + } + + // find conv operation to match conv output and reduction workspace data type + ConvFunctionalKey key( + library::Provider::kCUTLASS, + conv_desc.conv_kind, + conv_desc.A.element, + conv_desc.A.layout, + conv_desc.B.element, + conv_desc.B.layout, + conv_desc.tile_description.math_instruction.element_accumulator, + conv_desc.C.layout, + conv_desc.tile_description.math_instruction.element_accumulator, + conv_desc.element_epilogue); + + // conv operation table for conv2d or conv3d + auto conv_operations = (conv_desc.kind == OperationKind::kConv2d) ? + Singleton::get().operation_table.conv2d_operations : + Singleton::get().operation_table.conv3d_operations; + + // find ConvFunctionalKey in convolution operation table + auto operators_it = conv_operations.find(key); + + if (operators_it == conv_operations.end()) { + return nullptr; + } + + if (operators_it->second.empty()) { + return nullptr; + } + + // conv operation for same compute capability and iterator algorithm + ConvPreferenceKey preference_key( + conv_desc.tile_description.minimum_compute_capability, + conv_desc.iterator_algorithm); + + auto it = operators_it->second.find(preference_key); + + if(it == operators_it->second.end()) { + return nullptr; + } + + // return matching conv opertion (same tile sizes and instruction) + for (auto op : it->second) { + if (op->description().tile_description == operation->description().tile_description) { + return op; + } + } + + return nullptr; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Finds gemm operation instances with Gemm::ElementC = Reduction::ElementWorkspace +Operation const* find_gemm_operation_for_parallel_reduction(Operation const *operation) { + + GemmDescription const &gemm_desc = + static_cast(operation->description()); + + // if the curren gemm operation accumulator and output data type match return operation + if(gemm_desc.tile_description.math_instruction.element_accumulator == gemm_desc.D.element) { + return operation; + } + + // find gemm operation to match gemm output and reduction workspace data type + GemmFunctionalKey key( + library::Provider::kCUTLASS, + gemm_desc.gemm_kind, + gemm_desc.tile_description.math_instruction.element_accumulator, + gemm_desc.element_epilogue, + gemm_desc.A.element, + gemm_desc.A.layout, + gemm_desc.transform_A, + gemm_desc.B.element, + gemm_desc.B.layout, + gemm_desc.transform_B, + gemm_desc.tile_description.math_instruction.element_accumulator, // C/D are same type + LayoutTypeID::kColumnMajor, + gemm_desc.tile_description.math_instruction.element_accumulator, + LayoutTypeID::kColumnMajor); + + // gemm operation table + auto gemm_operations = Singleton::get().operation_table.gemm_operations; + + // find ConvFunctionalKey in gemm operation table + auto operators_it = gemm_operations.find(key); + + if (operators_it == gemm_operations.end()) { + return nullptr; + } + + if (operators_it->second.empty()) { + return nullptr; + } + + // A and B uses the same alignment in the generator.py + int alignment = gemm_desc.A.alignment; + + // gemm operation for same compute capability and iterator algorithm + GemmPreferenceKey preference_key( + gemm_desc.tile_description.minimum_compute_capability, + alignment); + + return find_gemm_operation(operators_it, preference_key); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/library_internal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/library_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..4e4e09d2e1c0878861dd0c58093a130943a2ca10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/library_internal.h @@ -0,0 +1,368 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + + \brief CUTLASS Library is an object-oriented approach to managing operations implemented by CUTLASS. + + Generally, + + description - compile-time constant parameters used to instantiate an operation + + configuration - runtime parameters with computationally expensive initialization + + arguments - runtime parameters that may be passed to an initialized operation with low + computational overhead +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" +#include "cutlass/arch/mma.h" +#include "cutlass/layout/matrix.h" + +#include "cutlass/library/library.h" +#include "cutlass/library/arch_mappings.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template struct NumericTypeMap; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kVoid; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kB1; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kS4; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kS8; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kS16; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kS32; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kS64; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kU4; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kU8; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kFE4M3; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kFE5M2; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kU16; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kU32; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kU64; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kF16; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kF32; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kF64; +}; + +template <> struct NumericTypeMap > { + static NumericTypeID const kId = NumericTypeID::kCF16; +}; + +template <> struct NumericTypeMap > { + static NumericTypeID const kId = NumericTypeID::kCF32; +}; + +template <> struct NumericTypeMap > { + static NumericTypeID const kId = NumericTypeID::kCF64; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kBF16; +}; + +template <> struct NumericTypeMap { + static NumericTypeID const kId = NumericTypeID::kTF32; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kInvalid; +}; + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kMultiplyAdd; +}; + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kMultiplyAddFastBF16; +}; + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kMultiplyAddFastF16; +}; + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kMultiplyAddSaturate; +}; + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kMultiplyAddComplex; +}; + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kMultiplyAddGaussianComplex; +}; + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kXorPopc; +}; + + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kMultiplyAddFastF32; +}; + +template <> struct MathOperationMap { + static MathOperationID const kId = MathOperationID::kMultiplyAddComplexFastF32; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template struct LayoutMap; + +template <> struct LayoutMap { + static LayoutTypeID const kId = LayoutTypeID::kColumnMajor; +}; + +template <> struct LayoutMap { + static LayoutTypeID const kId = LayoutTypeID::kRowMajor; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK2; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK2; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK4; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK4; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK16; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK16; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK32; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK32; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK64; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK64; +}; + +template <> struct LayoutMap { + static LayoutTypeID const kId = LayoutTypeID::kTensorNHWC; +}; + +template <> struct LayoutMap { + static LayoutTypeID const kId = LayoutTypeID::kTensorNDHWC; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kTensorNC32HW32; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kTensorNC64HW64; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kTensorC32RSK32; +}; + +template <> struct LayoutMap> { + static LayoutTypeID const kId = LayoutTypeID::kTensorC64RSK64; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template struct OpcodeClassMap; + +template <> struct OpcodeClassMap { + static OpcodeClassID const kId = OpcodeClassID::kSimt; +}; + +template <> struct OpcodeClassMap { + static OpcodeClassID const kId = OpcodeClassID::kTensorOp; +}; + +template <> struct OpcodeClassMap { + static OpcodeClassID const kId = OpcodeClassID::kWmmaTensorOp; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template struct ComplexTransformMap; + +template <> struct ComplexTransformMap { + static cutlass::library::ComplexTransform const kId = cutlass::library::ComplexTransform::kNone; +}; + +template <> struct ComplexTransformMap { + static cutlass::library::ComplexTransform const kId = cutlass::library::ComplexTransform::kConjugate; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template struct ConvModeMap; + +template <> struct ConvModeMap { + static ConvModeID const kId = ConvModeID::kCrossCorrelation; +}; + +template <> struct ConvModeMap { + static ConvModeID const kId = ConvModeID::kConvolution; +}; + + +template struct ConvKindMap; + +template <> struct ConvKindMap { + static ConvKind const kId = ConvKind::kFprop; +}; + +template <> struct ConvKindMap { + static ConvKind const kId = ConvKind::kDgrad; +}; + +template <> struct ConvKindMap { + static ConvKind const kId = ConvKind::kWgrad; +}; + + +template struct IteratorAlgorithmMap; + +template <> struct IteratorAlgorithmMap { + static IteratorAlgorithmID const kId = IteratorAlgorithmID::kAnalytic; +}; + +template <> struct IteratorAlgorithmMap { + static IteratorAlgorithmID const kId = IteratorAlgorithmID::kOptimized; +}; + +template <> struct IteratorAlgorithmMap { + static IteratorAlgorithmID const kId = IteratorAlgorithmID::kFixedChannels; +}; + +template <> struct IteratorAlgorithmMap { + static IteratorAlgorithmID const kId = IteratorAlgorithmID::kFewChannels; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +TensorDescription make_TensorDescription(int alignment = 1) { + TensorDescription desc; + + desc.element = NumericTypeMap::kId; + desc.layout = LayoutMap::kId; + desc.alignment = alignment; + desc.log_extent_range = int(sizeof(typename Layout::TensorCoord::Index) - 1) * 8; + desc.log_stride_range = int(sizeof(typename Layout::Stride::Index) - 1) * 8; + + return desc; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/manifest.cpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/manifest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1f3c456ab5caab8006ae98d5fc62156ad075f032 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/manifest.cpp @@ -0,0 +1,105 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Manifest of CUTLASS Library + + This is the root of the data structure containing CUTLASS objects +*/ + +#include +#include "cutlass/library/manifest.h" + +namespace cutlass { +namespace library { + +////////////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_reference_operations(Manifest &manifest); + +////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Top-level initialization +Status Manifest::initialize() { + + if (!operations_.empty()) { + operations_.clear(); + } + + // initialize procedurally generated cutlass op in manifest object + initialize_all(*this); + + // initialize manually instanced reference op in manifest object + initialize_reference_operations(*this); + + // initialize manually instanced reduction reference op in manifest object + initialize_all_reduction_op(*this); + + return Status::kSuccess; +} + +/// Used for initialization +void Manifest::reserve(size_t operation_count) { + operations_.reserve(operation_count); +} + +/// Graceful shutdown +Status Manifest::release() { + operations_.clear(); + return Status::kSuccess; +} + +/// Appends an operation and takes ownership +void Manifest::append(Operation *operation_ptr) { + operations_.emplace_back(operation_ptr); +} + +/// Returns an iterator to the first operation +OperationVector const & Manifest::operations() const { + return operations_; +} + +/// Returns a const iterator +OperationVector::const_iterator Manifest::begin() const { + return operations_.begin(); +} + +/// Returns a const iterator +OperationVector::const_iterator Manifest::end() const { + return operations_.end(); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/operation_table.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/operation_table.cu new file mode 100644 index 0000000000000000000000000000000000000000..113e48d20d67af15261bf459018d952f89f3ee19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/operation_table.cu @@ -0,0 +1,146 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* + \file + \brief Defines a data structure in which a set of functionally equivalent library::Operation + instances may be queried. +*/ + +#include "cutlass/library/operation_table.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +void OperationTable::append(Manifest const &manifest) { + + // Insert operations into appropriate data structure + for (auto const & operation : manifest) { + + OperationDescription const &desc = operation->description(); + + // insert all gemm operation into operation table + if (desc.kind == OperationKind::kGemm) { + GemmDescription const &gemm_desc = static_cast(desc); + + + GemmFunctionalKey functional_key( + gemm_desc.provider, + gemm_desc.gemm_kind, + gemm_desc.tile_description.math_instruction.element_accumulator, + gemm_desc.element_epilogue, + gemm_desc.A.element, + gemm_desc.A.layout, + gemm_desc.transform_A, + gemm_desc.B.element, + gemm_desc.B.layout, + gemm_desc.transform_B, + gemm_desc.C.element, + gemm_desc.C.layout, + gemm_desc.D.element, + gemm_desc.D.layout + ); + + Operation const *op = operation.get(); + + int cc = gemm_desc.tile_description.minimum_compute_capability; + + int alignment = std::max(std::max( + gemm_desc.A.alignment, gemm_desc.B.alignment), gemm_desc.C.alignment); + + GemmPreferenceKey preference_key(cc, alignment); + + gemm_operations[functional_key][preference_key].push_back(op); + } + + // insert all conv2d or conv3d operation into operation table + if (desc.kind == OperationKind::kConv2d || desc.kind == OperationKind::kConv3d) { + auto &conv_desc = static_cast(desc); + + ConvFunctionalKey functional_key( + conv_desc.provider, + conv_desc.conv_kind, + conv_desc.A.element, + conv_desc.A.layout, + conv_desc.B.element, + conv_desc.B.layout, + conv_desc.C.element, + conv_desc.C.layout, + conv_desc.tile_description.math_instruction.element_accumulator, + conv_desc.element_epilogue + ); + + Operation const *op = operation.get(); + + int cc = conv_desc.tile_description.minimum_compute_capability; + + ConvPreferenceKey preference_key(cc, conv_desc.iterator_algorithm); + + // insert conv operation to conv2d_operations or conv3d_operations map + (desc.kind == OperationKind::kConv2d) ? + conv2d_operations[functional_key][preference_key].push_back(op) : + conv3d_operations[functional_key][preference_key].push_back(op); + } + + // insert all reduction operation into operation table + if (desc.kind == OperationKind::kReduction) { + auto &reduce_desc = static_cast(desc); + + ReductionFunctionalKey functional_key( + reduce_desc.provider, + reduce_desc.element_workspace, + reduce_desc.tile_description.math_instruction.element_accumulator, + reduce_desc.element_output, + reduce_desc.element_epilogue, + library::MathOperationID::kAdd, + library::EpilogueKind::kLinearCombination + ); + + Operation const *op = operation.get(); + + reduction_operations[functional_key] = op; + + } + + } + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/rank_2k_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/rank_2k_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..901f2ea651a76de9f5ffbd1074ffbba1f17fcd49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/rank_2k_operation.h @@ -0,0 +1,373 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all Rank 2K operation kinds (Syr2k, Her2k) + in CUTLASS Library. + + +*/ + +#pragma once +#include +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/rank_2k.h" +#include "cutlass/gemm/kernel/default_rank_2k_universal.h" + +#include "cutlass/library/library.h" +#include "library_internal.h" +#include "cutlass/core_io.h" +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class Rank2KOperationBase : public Operation { +public: + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + static BlasMode const kBlasMode = Operator::kBlasMode; + static int const kUpdateRank = Operator::kUpdateRank; + static FillMode const kFillModeC = Operator::kFillModeC; + + using OperatorArguments = typename Operator::Arguments; + +protected: + + /// + RankKDescription description_; + +public: + + /// Constructor + Rank2KOperationBase(char const *name = "unknown_rank_k") { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.rank_k_kind = RankKKind::kUniversal; + description_.fill_mode = kFillModeC; + description_.blas_mode = kBlasMode; + description_.num_ranks = kUpdateRank; + + description_.kind = OperationKind::kRank2K; + + description_.tile_description.threadblock_shape = make_Coord( + Operator::ThreadblockShape::kM, + Operator::ThreadblockShape::kN, + Operator::ThreadblockShape::kK); + + description_.tile_description.threadblock_stages = Operator::kStages; + + description_.tile_description.warp_count = make_Coord( + Operator::Rank2Kkernel::WarpCount::kM, + Operator::Rank2Kkernel::WarpCount::kN, + Operator::Rank2Kkernel::WarpCount::kK); + + description_.tile_description.math_instruction.instruction_shape = make_Coord( + Operator::InstructionShape::kM, + Operator::InstructionShape::kN, + Operator::InstructionShape::kK); + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + description_.tile_description.math_instruction.opcode_class = + OpcodeClassMap::kId; + + description_.tile_description.math_instruction.math_operation = + MathOperationMap::kId; + + description_.tile_description.minimum_compute_capability = + ArchMap::kMin; + + description_.tile_description.maximum_compute_capability = + ArchMap::kMax; + + description_.A = make_TensorDescription(Operator::kAlignmentA); + description_.B = make_TensorDescription(Operator::kAlignmentB); + description_.C = make_TensorDescription(Operator::kAlignmentC); + description_.element_epilogue = NumericTypeMap::kId; + + description_.split_k_mode = SplitKMode::kNone; + description_.transform_A = ComplexTransformMap::kId; + description_.transform_B = ComplexTransformMap::kId; + } + + /// Returns the description of the SYRK operation + virtual OperationDescription const & description() const { + return description_; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class Rank2KOperation : public Rank2KOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + static BlasMode const kBlasMode = Operator::kBlasMode; + static int const kUpdateRank = Operator::kUpdateRank; + static FillMode const kFillModeC = Operator::kFillModeC; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + Rank2KOperation(char const *name = "unknown_rank_2k"): + Rank2KOperationBase(name) { + + this->description_.rank_k_kind = RankKKind::kUniversal; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + RankKConfiguration const *configuration) { + + //operator_args.mode = configuration->mode; + + operator_args.problem_size = configuration->problem_size; + operator_args.batch_count = configuration->batch_count; + + operator_args.lda = int(configuration->lda); + operator_args.ldb = int(configuration->ldb); + operator_args.ldc = int(configuration->ldc); + operator_args.ldd = int(configuration->ldd); + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + RankKArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + // update arguments + operator_args.ptr_A = arguments->A; + operator_args.ptr_B = arguments->B; + operator_args.ptr_C = arguments->C; + operator_args.ptr_D = arguments->D; + + operator_args.batch_stride_A = arguments->batch_stride_A; + operator_args.batch_stride_B = arguments->batch_stride_B; + operator_args.batch_stride_C = arguments->batch_stride_C; + operator_args.batch_stride_D = arguments->batch_stride_D; + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + RankKConfiguration const *configuration = + static_cast(configuration_ptr); + + RankKArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + + return size; + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + //std::cout << "initialize() library::Rank2KOperation" << std::endl; + //print_operator_args(args); + status = op->initialize(args, device_workspace, stream); + + return status; + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args, device_workspace); + + if (status != Status::kSuccess) { + return status; + } + + //std::cout << "run() library::Rank2KOperation" << std::endl; + //print_operator_args(args); + status = op->run(stream); + + return status; + } + + /// Call print_operator_args from the Conv2dOperation::initialize() + // to dump arguments passed on to cutlass operator for debugging + void print_operator_args(OperatorArguments &operator_args) const { + std::cout << "Rank2KOperation::OperatorArguments" << std::endl + << " problem_size:" << std::endl + << operator_args.problem_size << std::endl + << " epilogue (alpha, beta): " + << operator_args.epilogue.alpha << ", " + << operator_args.epilogue.beta << std::endl + << " ref_A (ptr, {stride}): " + << operator_args.ptr_A << ", {" + << operator_args.lda << "}" << std::endl + << " ref_B (ptr, {stride}): " + << operator_args.ptr_B << ", {" + << operator_args.ldb << "}" << std::endl + << " ref_C (ptr, {stride}): " + << operator_args.ptr_C << ", {" + << operator_args.ldc << "}" << std::endl + << " ref_D (ptr, {stride}): " + << operator_args.ptr_D << ", {" + << operator_args.ldd << "}" << std::endl; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/rank_k_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/rank_k_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..2eb7a2dcaedc9ccc787375fbf1047c2c0119ff1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/rank_k_operation.h @@ -0,0 +1,344 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all Rank K operation kinds (Syrk, Herk) + in CUTLASS Library. + + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/rank_k.h" +#include "cutlass/gemm/kernel/default_rank_k_universal.h" + +#include "cutlass/library/library.h" +#include "library_internal.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class RankKOperationBase : public Operation { +public: + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementA; + using LayoutB = typename Operator::LayoutA; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + static BlasMode const kBlasMode = Operator::kBlasMode; + static int const kUpdateRank = Operator::kUpdateRank; + static FillMode const kFillModeC = Operator::kFillModeC; + + using OperatorArguments = typename Operator::Arguments; + +protected: + + /// + RankKDescription description_; + +public: + + /// Constructor + RankKOperationBase(char const *name = "unknown_rank_k") { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.rank_k_kind = RankKKind::kUniversal; + description_.fill_mode = kFillModeC; + description_.blas_mode = kBlasMode; + description_.num_ranks = kUpdateRank; + + description_.kind = OperationKind::kRankK; + + description_.tile_description.threadblock_shape = make_Coord( + Operator::ThreadblockShape::kM, + Operator::ThreadblockShape::kN, + Operator::ThreadblockShape::kK); + + description_.tile_description.threadblock_stages = Operator::kStages; + + description_.tile_description.warp_count = make_Coord( + Operator::RankKkernel::WarpCount::kM, + Operator::RankKkernel::WarpCount::kN, + Operator::RankKkernel::WarpCount::kK); + + description_.tile_description.math_instruction.instruction_shape = make_Coord( + Operator::InstructionShape::kM, + Operator::InstructionShape::kN, + Operator::InstructionShape::kK); + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + description_.tile_description.math_instruction.opcode_class = + OpcodeClassMap::kId; + + description_.tile_description.math_instruction.math_operation = + MathOperationMap::kId; + + description_.tile_description.minimum_compute_capability = + ArchMap::kMin; + + description_.tile_description.maximum_compute_capability = + ArchMap::kMax; + + description_.A = make_TensorDescription(Operator::kAlignmentA); + description_.B = make_TensorDescription(Operator::kAlignmentA); + description_.C = make_TensorDescription(Operator::kAlignmentC); + description_.element_epilogue = NumericTypeMap::kId; + + description_.split_k_mode = SplitKMode::kNone; + description_.transform_A = ComplexTransformMap::kId; + description_.transform_B = ComplexTransformMap::kId; + } + + /// Returns the description of the SYRK operation + virtual OperationDescription const & description() const { + return description_; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class RankKOperation : public RankKOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementA; + using LayoutB = typename Operator::LayoutA; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + static BlasMode const kBlasMode = Operator::kBlasMode; + static int const kUpdateRank = Operator::kUpdateRank; + static FillMode const kFillModeC = Operator::kFillModeC; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + RankKOperation(char const *name = "unknown_rank_k"): + RankKOperationBase(name) { + + this->description_.rank_k_kind = RankKKind::kUniversal; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + RankKConfiguration const *configuration) { + + //operator_args.mode = configuration->mode; + + operator_args.problem_size = configuration->problem_size; + operator_args.batch_count = configuration->batch_count; + + operator_args.lda = int(configuration->lda); + operator_args.ldb = int(configuration->lda); + operator_args.ldc = int(configuration->ldc); + operator_args.ldd = int(configuration->ldd); + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + RankKArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + // update arguments + operator_args.ptr_A = arguments->A; + operator_args.ptr_C = arguments->C; + operator_args.ptr_D = arguments->D; + + operator_args.batch_stride_A = arguments->batch_stride_A; + operator_args.batch_stride_C = arguments->batch_stride_C; + operator_args.batch_stride_D = arguments->batch_stride_D; + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + RankKConfiguration const *configuration = + static_cast(configuration_ptr); + + RankKArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + + return size; + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + status = op->initialize(args, device_workspace, stream); + + return status; + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args, device_workspace); + + if (status != Status::kSuccess) { + return status; + } + + status = op->run(stream); + + return status; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/init_reduction_operations.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/init_reduction_operations.cu new file mode 100644 index 0000000000000000000000000000000000000000..2d14166b13ae2a1127b22d3131230af99e79a009 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/init_reduction_operations.cu @@ -0,0 +1,66 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Initialize operations for reduction operation in CUTLASS Library. + +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +namespace cutlass { +namespace library { +/////////////////////////////////////////////////////////////////////////////////////////////// +// CUTLASS Reduction Instances // +/////////////////////////////////////////////////////////////////////////////////////////////// +void initialize_reduce_add_linear_combination_f16_f16_f16(Manifest &manifest); +void initialize_reduce_add_linear_combination_f32_f32_f16(Manifest &manifest); +void initialize_reduce_add_linear_combination_f32_f32_f32(Manifest &manifest); +void initialize_reduce_add_linear_combination_f64_f64_f64(Manifest &manifest); +void initialize_reduce_add_linear_combination_cf32_cf32_cf32(Manifest &manifest); + +// +// Entry point to construct operations +// +void initialize_all_reduction_op(Manifest &manifest) { + + initialize_reduce_add_linear_combination_f16_f16_f16(manifest); + initialize_reduce_add_linear_combination_f32_f32_f16(manifest); + initialize_reduce_add_linear_combination_f32_f32_f32(manifest); + initialize_reduce_add_linear_combination_f64_f64_f64(manifest); + initialize_reduce_add_linear_combination_cf32_cf32_cf32(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/reduction_device.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/reduction_device.cu new file mode 100644 index 0000000000000000000000000000000000000000..5ede2fdfae5530eee468500eabbf323071800811 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/reduction_device.cu @@ -0,0 +1,219 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for reduction operation in CUTLASS Library. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "reduction_operation.h" + +namespace cutlass { +namespace library { + +// naming convention initialize_reduce_[ReductionOp]_[EpilogueOp]_[ElementWorkspace]_[ElementAccumulator]_[ElementOutput] + + +void initialize_reduce_add_linear_combination_f16_f16_f16(Manifest &manifest) { + + using ElementWorkspace = cutlass::half_t; + using ElementAccumulator = cutlass::half_t; + using ElementOutput = cutlass::half_t; + using ElementCompute = cutlass::half_t; + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + typename EpilogueOutputOp::ElementAccumulator, + EpilogueOutputOp::kCount + >; + + using Operation_reduce_add_linear_combination_f16_f16_f16 = cutlass::reduction::device::ReduceSplitK< + cutlass::reduction::kernel::ReduceSplitK< + cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, + EpilogueOutputOp, + ReductionOp + > + >; + + manifest.append(new ReductionOperation< + Operation_reduce_add_linear_combination_f16_f16_f16>( + "reduce_add_linear_combination_f16_f16_f16" + )); +} + +void initialize_reduce_add_linear_combination_f32_f32_f16(Manifest &manifest) { + + using ElementWorkspace = float; + using ElementAccumulator = float; + using ElementOutput = cutlass::half_t; + using ElementCompute = float; + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + typename EpilogueOutputOp::ElementAccumulator, + EpilogueOutputOp::kCount + >; + + using Operation_reduce_add_linear_combination_f32_f32_f16 = cutlass::reduction::device::ReduceSplitK< + cutlass::reduction::kernel::ReduceSplitK< + cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, + EpilogueOutputOp, + ReductionOp + > + >; + + manifest.append(new ReductionOperation< + Operation_reduce_add_linear_combination_f32_f32_f16>( + "reduce_add_linear_combination_f32_f32_f16" + )); +} + + +void initialize_reduce_add_linear_combination_f32_f32_f32(Manifest &manifest) { + + using ElementWorkspace = float; + using ElementAccumulator = float; + using ElementOutput = float; + using ElementCompute = float; + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + typename EpilogueOutputOp::ElementAccumulator, + EpilogueOutputOp::kCount + >; + + using Operation_reduce_add_linear_combination_f32_f32_f32 = cutlass::reduction::device::ReduceSplitK< + cutlass::reduction::kernel::ReduceSplitK< + cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, + EpilogueOutputOp, + ReductionOp + > + >; + + manifest.append(new ReductionOperation< + Operation_reduce_add_linear_combination_f32_f32_f32>( + "reduce_add_linear_combination_f32_f32_f32" + )); +} + +void initialize_reduce_add_linear_combination_f64_f64_f64(Manifest &manifest) { + + using ElementWorkspace = double; + using ElementAccumulator = double; + using ElementOutput = double; + using ElementCompute = double; + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + typename EpilogueOutputOp::ElementAccumulator, + EpilogueOutputOp::kCount + >; + + using Operation_reduce_add_linear_combination_f64_f64_f64 = cutlass::reduction::device::ReduceSplitK< + cutlass::reduction::kernel::ReduceSplitK< + cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, + EpilogueOutputOp, + ReductionOp + > + >; + + manifest.append(new ReductionOperation< + Operation_reduce_add_linear_combination_f64_f64_f64>( + "reduce_add_linear_combination_f64_f64_f64" + )); +} + +void initialize_reduce_add_linear_combination_cf32_cf32_cf32(Manifest &manifest) { + + using ElementWorkspace = cutlass::complex; + using ElementAccumulator = cutlass::complex; + using ElementOutput = cutlass::complex; + using ElementCompute = cutlass::complex; + + using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, + 128 / cutlass::sizeof_bits::value, + ElementAccumulator, + ElementCompute + >; + + using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ElementAccumulator, + typename EpilogueOutputOp::ElementAccumulator, + EpilogueOutputOp::kCount + >; + + using Operation_reduce_add_linear_combination_cf32_cf32_cf32 = cutlass::reduction::device::ReduceSplitK< + cutlass::reduction::kernel::ReduceSplitK< + cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, + EpilogueOutputOp, + ReductionOp + > + >; + + manifest.append(new ReductionOperation< + Operation_reduce_add_linear_combination_cf32_cf32_cf32>( + "reduce_add_linear_combination_cf32_cf32_cf32" + )); +} + +} +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/reduction_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/reduction_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..4a6909ccbf544e950c0723eda044213aadaf61b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reduction/reduction_operation.h @@ -0,0 +1,290 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for reduction operation in CUTLASS Library. +*/ + +#pragma once +#include +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/linear_combination_clamp.h" +#include "cutlass/reduction/thread/reduction_operators.h" +#include "cutlass/reduction/device/reduce_split_k.h" + +#include "cutlass/library/library.h" +#include "library_internal.h" +#include "cutlass/core_io.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class ReductionOperation : public Operation { +public: + using Operator = Operator_; + + using ElementWorkspace = typename Operator::ElementWorkspace; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementOutput = typename Operator::ElementOutput; + + using ElementCompute = typename Operator::OutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +protected: + + /// + ReductionDescription description_; + +public: + + /// Constructor + ReductionOperation(char const *name = "unknown_reduction") { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.kind = OperationKind::kReduction; + + description_.tile_description.threadblock_shape = make_Coord(Operator::Shape::kRow, Operator::Shape::kColumn, 1); + + description_.tile_description.math_instruction.instruction_shape = make_Coord(1, 1, 1); + description_.tile_description.math_instruction.element_accumulator = NumericTypeMap::kId; + description_.tile_description.math_instruction.opcode_class = OpcodeClassID::kSimt; + description_.tile_description.math_instruction.math_operation = MathOperationID::kAdd; + + description_.tile_description.minimum_compute_capability = 50; + description_.tile_description.maximum_compute_capability = 1024; + + description_.element_workspace = NumericTypeMap::kId; + description_.element_output = NumericTypeMap::kId; + description_.element_epilogue = NumericTypeMap::kId; + + } + + /// Returns the description of the Reduction operation + virtual OperationDescription const & description() const { + return description_; + } + + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + ReductionConfiguration const *configuration) { + + operator_args.problem_size = configuration->problem_size; + operator_args.partitions = configuration->partitions; + operator_args.partition_stride = configuration->partition_stride; + + operator_args.workspace = {nullptr, int(configuration->ldw)}; + operator_args.source = {nullptr, int(configuration->lds)}; + operator_args.destination = {nullptr, int(configuration->ldd)}; + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + ReductionArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::OutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.output = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::OutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.output = params; + } + else { + return Status::kErrorInvalidProblem; + } + + operator_args.workspace.reset(static_cast(const_cast(arguments->workspace))); + operator_args.source.reset(static_cast(const_cast(arguments->source))); + operator_args.destination.reset(static_cast(const_cast(arguments->destination))); + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + ReductionConfiguration const *configuration = + static_cast(configuration_ptr); + + ReductionArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + return Operator::get_workspace_size(args); + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + //std::cout << "initialize library::Reduction" << std::endl; + //print_operator_args(args); + return op->initialize(args, device_workspace, stream); + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + status = op->update(args, device_workspace); + + if (status != Status::kSuccess) { + return status; + } + + //std::cout << "run library::Reduction" << std::endl; + //print_operator_args(args); + return op->run(stream); + } + + /// Call print_operator_args from the Reduction::initialize() + // to dump arguments passed on to cutlass operator for debugging + void print_operator_args(OperatorArguments &operator_args) const { + std::cout << "Reduction::OperatorArguments" << std::endl + << " problem_size: " + << operator_args.problem_size << std::endl + << " partitions: " + << operator_args.partitions << std::endl + << " partition_stride: " + << operator_args.partition_stride << std::endl + << " epilogue (alpha, beta): " + << operator_args.output.alpha << ", " + << operator_args.output.beta << std::endl + << " workspace (ptr, stride): " + << operator_args.workspace.data() << ", " + << operator_args.workspace.stride(0) << std::endl + << " source (ptr, stride): " + << operator_args.source.data() << ", " + << operator_args.source.stride(0) << std::endl + << " destination (ptr, stride): " + << operator_args.destination.data() << ", " + << operator_args.destination.stride(0) << std::endl; + } +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv2d.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv2d.cu new file mode 100644 index 0000000000000000000000000000000000000000..715e3b0dd763c1d602b3aecbd61a2db84097ff5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv2d.cu @@ -0,0 +1,229 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief + +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "conv_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_conv2d_reference_operations(Manifest &manifest) { + + make_conv_all< + 2, + cutlass::half_t, cutlass::layout::TensorNHWC, + cutlass::half_t, cutlass::layout::TensorNHWC, + cutlass::half_t, cutlass::layout::TensorNHWC, + cutlass::half_t, + cutlass::half_t + >(manifest); + + make_conv_all< + 2, + cutlass::half_t, cutlass::layout::TensorNHWC, + cutlass::half_t, cutlass::layout::TensorNHWC, + cutlass::half_t, cutlass::layout::TensorNHWC, + float, + float + >(manifest); + + make_conv_all< + 2, + cutlass::half_t, cutlass::layout::TensorNHWC, + cutlass::half_t, cutlass::layout::TensorNHWC, + float, cutlass::layout::TensorNHWC, + float, + float + >(manifest); + + make_conv_all< + 2, + cutlass::bfloat16_t, cutlass::layout::TensorNHWC, + cutlass::bfloat16_t, cutlass::layout::TensorNHWC, + cutlass::bfloat16_t, cutlass::layout::TensorNHWC, + float, + float + >(manifest); + + make_conv_all< + 2, + cutlass::bfloat16_t, cutlass::layout::TensorNHWC, + cutlass::bfloat16_t, cutlass::layout::TensorNHWC, + float, cutlass::layout::TensorNHWC, + float, + float + >(manifest); + + make_conv_all< + 2, + cutlass::tfloat32_t, cutlass::layout::TensorNHWC, + cutlass::tfloat32_t, cutlass::layout::TensorNHWC, + cutlass::tfloat32_t, cutlass::layout::TensorNHWC, + float, + float + >(manifest); + + make_conv_all< + 2, + cutlass::tfloat32_t, cutlass::layout::TensorNHWC, + cutlass::tfloat32_t, cutlass::layout::TensorNHWC, + float, cutlass::layout::TensorNHWC, + float, + float + >(manifest); + + make_conv_all< + 2, + float, cutlass::layout::TensorNHWC, + float, cutlass::layout::TensorNHWC, + float, cutlass::layout::TensorNHWC, + float, + float + >(manifest); + + make_conv_all< + 2, + cutlass::complex, cutlass::layout::TensorNHWC, + cutlass::complex, cutlass::layout::TensorNHWC, + cutlass::complex, cutlass::layout::TensorNHWC, + cutlass::complex, + cutlass::complex + >(manifest); + + make_conv_fprop< + 2, + int8_t, cutlass::layout::TensorNHWC, + int8_t, cutlass::layout::TensorNHWC, + int32_t, cutlass::layout::TensorNHWC, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 2, + int8_t, cutlass::layout::TensorNHWC, + int8_t, cutlass::layout::TensorNHWC, + int8_t, cutlass::layout::TensorNHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 2, + uint8_t, cutlass::layout::TensorNHWC, + uint8_t, cutlass::layout::TensorNHWC, + uint8_t, cutlass::layout::TensorNHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 2, + uint8_t, cutlass::layout::TensorNHWC, + uint8_t, cutlass::layout::TensorNHWC, + int32_t, cutlass::layout::TensorNHWC, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 2, + uint8_t, cutlass::layout::TensorNHWC, + uint8_t, cutlass::layout::TensorNHWC, + int8_t, cutlass::layout::TensorNHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 2, + cutlass::int4b_t, cutlass::layout::TensorNHWC, + cutlass::int4b_t, cutlass::layout::TensorNHWC, + int32_t, cutlass::layout::TensorNHWC, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 2, + cutlass::int4b_t, cutlass::layout::TensorNHWC, + cutlass::int4b_t, cutlass::layout::TensorNHWC, + cutlass::int4b_t, cutlass::layout::TensorNHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 2, + cutlass::uint4b_t, cutlass::layout::TensorNHWC, + cutlass::uint4b_t, cutlass::layout::TensorNHWC, + int32_t, cutlass::layout::TensorNHWC, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 2, + cutlass::uint4b_t, cutlass::layout::TensorNHWC, + cutlass::uint4b_t, cutlass::layout::TensorNHWC, + cutlass::uint4b_t, cutlass::layout::TensorNHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv3d.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv3d.cu new file mode 100644 index 0000000000000000000000000000000000000000..a0f9069433300830eb2c92b7233d1565fdb41584 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv3d.cu @@ -0,0 +1,209 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "conv_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_conv3d_reference_operations(Manifest &manifest) { + + make_conv_all< + 3, + cutlass::half_t, cutlass::layout::TensorNDHWC, + cutlass::half_t, cutlass::layout::TensorNDHWC, + cutlass::half_t, cutlass::layout::TensorNDHWC, + cutlass::half_t, + cutlass::half_t + >(manifest); + + make_conv_all< + 3, + cutlass::half_t, cutlass::layout::TensorNDHWC, + cutlass::half_t, cutlass::layout::TensorNDHWC, + cutlass::half_t, cutlass::layout::TensorNDHWC, + float, + float + >(manifest); + + make_conv_all< + 3, + cutlass::half_t, cutlass::layout::TensorNDHWC, + cutlass::half_t, cutlass::layout::TensorNDHWC, + float, cutlass::layout::TensorNDHWC, + float, + float + >(manifest); + + make_conv_all< + 3, + cutlass::bfloat16_t, cutlass::layout::TensorNDHWC, + cutlass::bfloat16_t, cutlass::layout::TensorNDHWC, + cutlass::bfloat16_t, cutlass::layout::TensorNDHWC, + float, + float + >(manifest); + + make_conv_all< + 3, + cutlass::bfloat16_t, cutlass::layout::TensorNDHWC, + cutlass::bfloat16_t, cutlass::layout::TensorNDHWC, + float, cutlass::layout::TensorNDHWC, + float, + float + >(manifest); + + make_conv_all< + 3, + cutlass::tfloat32_t, cutlass::layout::TensorNDHWC, + cutlass::tfloat32_t, cutlass::layout::TensorNDHWC, + cutlass::tfloat32_t, cutlass::layout::TensorNDHWC, + float, + float + >(manifest); + + make_conv_all< + 3, + cutlass::tfloat32_t, cutlass::layout::TensorNDHWC, + cutlass::tfloat32_t, cutlass::layout::TensorNDHWC, + float, cutlass::layout::TensorNDHWC, + float, + float + >(manifest); + + make_conv_all< + 3, + float, cutlass::layout::TensorNDHWC, + float, cutlass::layout::TensorNDHWC, + float, cutlass::layout::TensorNDHWC, + float, + float + >(manifest); + + make_conv_fprop< + 3, + int8_t, cutlass::layout::TensorNDHWC, + int8_t, cutlass::layout::TensorNDHWC, + int32_t, cutlass::layout::TensorNDHWC, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 3, + int8_t, cutlass::layout::TensorNDHWC, + int8_t, cutlass::layout::TensorNDHWC, + int8_t, cutlass::layout::TensorNDHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 3, + uint8_t, cutlass::layout::TensorNDHWC, + uint8_t, cutlass::layout::TensorNDHWC, + int32_t, cutlass::layout::TensorNDHWC, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 3, + uint8_t, cutlass::layout::TensorNDHWC, + uint8_t, cutlass::layout::TensorNDHWC, + int8_t, cutlass::layout::TensorNDHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 3, + cutlass::int4b_t, cutlass::layout::TensorNDHWC, + cutlass::int4b_t, cutlass::layout::TensorNDHWC, + int32_t, cutlass::layout::TensorNDHWC, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 3, + cutlass::int4b_t, cutlass::layout::TensorNDHWC, + cutlass::int4b_t, cutlass::layout::TensorNDHWC, + cutlass::int4b_t, cutlass::layout::TensorNDHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 3, + cutlass::uint4b_t, cutlass::layout::TensorNDHWC, + cutlass::uint4b_t, cutlass::layout::TensorNDHWC, + int32_t, cutlass::layout::TensorNDHWC, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_conv_fprop< + 3, + cutlass::uint4b_t, cutlass::layout::TensorNDHWC, + cutlass::uint4b_t, cutlass::layout::TensorNDHWC, + cutlass::uint4b_t, cutlass::layout::TensorNDHWC, + float, + int32_t, + NumericConverterClamp + >(manifest); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv_reference_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv_reference_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..4eac5deb97c8dad8cd761b3d97ea538df19c2680 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/conv_reference_operation.h @@ -0,0 +1,633 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all CONV operation kinds in CUTLASS Library +*/ + +#pragma once + +#include +#include +#include + +#include "cutlass/cutlass.h" + +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" +#include "cutlass/library/util.h" +#include "library_internal.h" + +#include "cutlass/util/reference/host/convolution.h" +#include "cutlass/util/reference/device/convolution.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + Provider kProvider, + conv::Operator ConvolutionalOperator, + int ConvDim, + typename ElementA_, + typename LayoutA_, + typename ElementB_, + typename LayoutB_, + typename ElementC_, + typename LayoutC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +struct ConvReferenceDispatcher; + +/// Dispatcher for Conv2d (partially specialized for kConvDim == 2) +template < + Provider kProvider, + conv::Operator kConvolutionalOperator, + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator, + typename ConvertOp, + typename InnerProductOp +> +struct ConvReferenceDispatcher< + kProvider, + kConvolutionalOperator, + 2, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp> { + + static Status dispatch( + void const *configuration, + ElementA *ptr_A, + ElementB *ptr_B, + ElementC *ptr_C, + ElementC *ptr_D, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr + ) { + + Conv2dConfiguration const &config = + *static_cast(configuration); + + // TODO: make below code more general. It is fixed for NHWC now. + layout::TensorNHWC layout_a; + layout::TensorNHWC layout_b; + layout::TensorNHWC layout_c; + + layout_a.stride() = + make_Coord(int32_t(config.stride_a[0]), + int32_t(config.stride_a[1]), + int32_t(config.stride_a[2])); + + layout_b.stride() = + make_Coord(int32_t(config.stride_b[0]), + int32_t(config.stride_b[1]), + int32_t(config.stride_b[2])); + + layout_c.stride() = + make_Coord(int32_t(config.stride_c[0]), + int32_t(config.stride_c[1]), + int32_t(config.stride_c[2])); + + if (kProvider == Provider::kReferenceHost) { + + cutlass::reference::host::Conv2d< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC , + LayoutC, + ElementCompute, + ElementAccumulator, + ElementC, + ConvertOp, + InnerProductOp + >( + kConvolutionalOperator, + config.problem_size, + {ptr_A, layout_a}, + {ptr_B, layout_b}, + {ptr_C, layout_c}, + {ptr_D, layout_c}, + alpha, + beta + ); + + return Status::kSuccess; + } + else if (kProvider == Provider::kReferenceDevice) { + return cutlass::reference::device::Conv2d< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp + >( + kConvolutionalOperator, + config.problem_size, + {ptr_A, layout_a}, + {ptr_B, layout_b}, + {ptr_C, layout_c}, + {ptr_D, layout_c}, + alpha, + beta, + stream + ); + } + return Status::kErrorNotSupported; + } +}; + +/// Dispatcher for Conv3d (partially specialized for kConvDim == 3) +template < + Provider kProvider, + conv::Operator kConvolutionalOperator, + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator, + typename ConvertOp, + typename InnerProductOp +> +struct ConvReferenceDispatcher< + kProvider, + kConvolutionalOperator, + 3, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp> { + + static Status dispatch( + void const *configuration, + ElementA *ptr_A, + ElementB *ptr_B, + ElementC *ptr_C, + ElementC *ptr_D, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr + ) { + + Conv3dConfiguration const &config = + *static_cast(configuration); + + ConvKind const conv_kind = ConvKindMap::kId; + + if (kProvider == Provider::kReferenceHost) { + cutlass::reference::host::Conv3d< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC , + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp + >( + kConvolutionalOperator, + config.problem_size, + {ptr_A, config.layout_a(conv_kind)}, + {ptr_B, config.layout_b(conv_kind)}, + {ptr_C, config.layout_c(conv_kind)}, + {ptr_D, config.layout_c(conv_kind)}, + alpha, + beta + ); + + return Status::kSuccess; + } + else if (kProvider == Provider::kReferenceDevice) { + return cutlass::reference::device::Conv3d< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp + >( + kConvolutionalOperator, + config.problem_size, + {ptr_A, config.layout_a(conv_kind)}, + {ptr_B, config.layout_b(conv_kind)}, + {ptr_C, config.layout_c(conv_kind)}, + {ptr_D, config.layout_c(conv_kind)}, + alpha, + beta, + stream + ); + } + return Status::kErrorNotSupported; + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + Provider Provider_, + conv::Operator ConvolutionalOperator, + int ConvDim, + typename ElementA_, + typename LayoutA_, + typename ElementB_, + typename LayoutB_, + typename ElementC_, + typename LayoutC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +class ConvReferenceOperation : public Operation { +public: + static Provider const kProvider = Provider_; + static conv::Operator const kConvolutionalOperator = ConvolutionalOperator; + static int const kConvDim = ConvDim; + + using ElementA = ElementA_; + using LayoutA = LayoutA_; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using ElementCompute = ElementCompute_; + using ElementAccumulator = ElementAccumulator_; + using ConvertOp = ConvertOp_; + using InnerProductOp = InnerProductOp_; + +protected: + + /// Storage for the name string + std::string name_; + + /// + ConvDescription description_; + +public: + + /// Constructor + ConvReferenceOperation() { + + // Basic information + description_.provider = kProvider; + description_.kind = (kConvDim == 2 ? OperationKind::kConv2d : OperationKind::kConv3d); + description_.conv_kind = ConvKindMap::kId; + description_.conv_dim = kConvDim; + + // Tensor description + description_.A = make_TensorDescription(); + description_.B = make_TensorDescription(); + description_.C = make_TensorDescription(); + + // Epilogue compute and accumulator type description + description_.element_epilogue = NumericTypeMap::kId; + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + // Iterator algorithm for convolution reference + description_.iterator_algorithm = IteratorAlgorithmID::kNone; + + // Compute capability for convolution reference + description_.tile_description.minimum_compute_capability = + (kProvider == Provider::kReferenceDevice ? 50 : 0); + + description_.tile_description.maximum_compute_capability = 1024; + + // Procedural name + std::stringstream ss; + + ss << "conv" << kConvDim << "d_" << to_string(description_.conv_kind) + << "_reference_" << to_string(description_.provider) + << "_" << to_string(description_.A.element) << to_string(description_.A.layout) + << "_" << to_string(description_.B.element) << to_string(description_.B.layout) + << "_" << to_string(description_.C.element) << to_string(description_.C.layout) + << "_" << to_string(description_.tile_description.math_instruction.element_accumulator); + + name_ = ss.str(); + + description_.name = name_.c_str(); + + // Epilogue compute and accumulator type description + description_.element_epilogue = NumericTypeMap::kId; + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + } + + /// Returns the description of the GEMM operation + virtual OperationDescription const & description() const { + return description_; + } + + virtual Status can_implement( + void const *configuration, + void const *arguments) const { + + return Status::kSuccess; + } + + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + switch (kConvDim) { + case 2: + return sizeof(Conv2dConfiguration); + case 3: + return sizeof(Conv3dConfiguration); + default: + break; + } + + return 0; + } + + virtual uint64_t get_device_workspace_size( + void const *configuration, + void const *arguments = nullptr) const { + + return 0; + } + + virtual Status initialize( + void const *configuration, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + std::memcpy(host_workspace, configuration, get_host_workspace_size(configuration)); + + return Status::kSuccess; + } + + virtual Status run( + void const *arguments, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + ConvArguments const &args = *static_cast(arguments); + + ElementCompute alpha; + ElementCompute beta; + + alpha = *static_cast(args.alpha); + beta = *static_cast(args.beta); + + // TODO - respect pointer mode + + // Invoke 2D or 3D convolution + return detail::ConvReferenceDispatcher< + kProvider, + kConvolutionalOperator, + kConvDim, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp + >::dispatch( + host_workspace, + static_cast(const_cast(args.A)), + static_cast(const_cast(args.B)), + static_cast(const_cast(args.C)), + static_cast(args.D), + alpha, + beta, + stream + ); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Constructs Fprop reference operators. +template < + int kConvDim, + typename ElementA_, + typename LayoutA_, + typename ElementB_, + typename LayoutB_, + typename ElementC_, + typename LayoutC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +void make_conv_fprop(Manifest &manifest) { + + manifest.append(new ConvReferenceOperation< + Provider::kReferenceHost, + conv::Operator::kFprop, + kConvDim, + ElementA_, LayoutA_, + ElementB_, LayoutB_, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ConvertOp_, + InnerProductOp_ + >); + + manifest.append(new ConvReferenceOperation< + Provider::kReferenceDevice, + conv::Operator::kFprop, + kConvDim, + ElementA_, LayoutA_, + ElementB_, LayoutB_, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ConvertOp_, + InnerProductOp_ + >); +} + +/// Constructs Dgrad and Wgrad reference operators. +template < + int kConvDim, + typename ElementA_, + typename LayoutA_, + typename ElementB_, + typename LayoutB_, + typename ElementC_, + typename LayoutC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +void make_conv_backwards(Manifest &manifest) { + + manifest.append(new ConvReferenceOperation< + Provider::kReferenceHost, + conv::Operator::kDgrad, + kConvDim, + ElementA_, LayoutA_, + ElementB_, LayoutB_, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ConvertOp_, + InnerProductOp_ + >); + + manifest.append(new ConvReferenceOperation< + Provider::kReferenceDevice, + conv::Operator::kDgrad, + kConvDim, + ElementA_, LayoutA_, + ElementB_, LayoutB_, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ConvertOp_, + InnerProductOp_ + >); + + manifest.append(new ConvReferenceOperation< + Provider::kReferenceHost, + conv::Operator::kWgrad, + kConvDim, + ElementA_, LayoutA_, + ElementB_, LayoutB_, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ConvertOp_, + InnerProductOp_ + >); + + manifest.append(new ConvReferenceOperation< + Provider::kReferenceDevice, + conv::Operator::kWgrad, + kConvDim, + ElementA_, LayoutA_, + ElementB_, LayoutB_, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ConvertOp_, + InnerProductOp_ + >); +} + +/// Six operators for the price of one. +template < + int kConvDim, + typename ElementA_, + typename LayoutA_, + typename ElementB_, + typename LayoutB_, + typename ElementC_, + typename LayoutC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +void make_conv_all(Manifest &manifest) { + + make_conv_fprop< + kConvDim, + ElementA_, LayoutA_, + ElementB_, LayoutB_, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_conv_backwards< + kConvDim, + ElementA_, LayoutA_, + ElementB_, LayoutB_, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ConvertOp_, + InnerProductOp_ + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e4m3a_e4m3out.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e4m3a_e4m3out.cu new file mode 100644 index 0000000000000000000000000000000000000000..0e3985d2af336f658a51f8e1436e582dd0daf3be --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e4m3a_e4m3out.cu @@ -0,0 +1,120 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations for FP8. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// FP8 GEMMs with FP8 E4M3 output +void initialize_gemm_reference_operations_e4m3a_e4m3out(Manifest &manifest) { + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + float_e4m3_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e4m3a_e5m2out.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e4m3a_e5m2out.cu new file mode 100644 index 0000000000000000000000000000000000000000..42f47e22aa02147127beca4334bcd629cea82ef3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e4m3a_e5m2out.cu @@ -0,0 +1,111 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations for FP8. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// FP8 GEMMs with FP8 E5M2 output +void initialize_gemm_reference_operations_e4m3a_e5m2out(Manifest &manifest) { + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e5m2a_e4m3out.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e5m2a_e4m3out.cu new file mode 100644 index 0000000000000000000000000000000000000000..97de2bc2bc3c5a05f8336e577fe36fa3d56ccb4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e5m2a_e4m3out.cu @@ -0,0 +1,111 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations for FP8. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// FP8 GEMMs with FP8 E4M3 output +void initialize_gemm_reference_operations_e5m2a_e4m3out(Manifest &manifest) { + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e4m3_t // ElementD + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e5m2a_e5m2out.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e5m2a_e5m2out.cu new file mode 100644 index 0000000000000000000000000000000000000000..ee5e561e6fd84df4a77bc1578db1f17d77c54240 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_e5m2a_e5m2out.cu @@ -0,0 +1,111 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations for FP8. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// FP8 GEMMs with FP8 E5M2 output +void initialize_gemm_reference_operations_e5m2a_e5m2out(Manifest &manifest) { + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float_e5m2_t // ElementD + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp32out.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp32out.cu new file mode 100644 index 0000000000000000000000000000000000000000..9c12eef6ad6b6f20c7d8e2dd8aec70e0d27a5812 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp32out.cu @@ -0,0 +1,112 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_gemm_reference_operations_fp32out(Manifest &manifest) { + make_gemm_real_canonical_layouts< + float, // ElementA + float, // ElementB + float, // ElementC + float, // ElementScalar + float // ElementAccumulator + >(manifest); + + make_gemm_real_canonical_layouts< + tfloat32_t, + tfloat32_t, + float, + float, + float + >(manifest); + + make_gemm_real_canonical_layouts< + tfloat32_t, + tfloat32_t, + tfloat32_t, + float, + float + >(manifest); + + make_gemm_real_canonical_layouts< + half_t, + half_t, + float, + float, + float + >(manifest); + + make_gemm_real_canonical_layouts< + half_t, + half_t, + half_t, + float, + float + >(manifest); + + make_gemm_real_canonical_layouts< + bfloat16_t, + bfloat16_t, + float, + float, + float + >(manifest); + + make_gemm_real_canonical_layouts< + bfloat16_t, + bfloat16_t, + bfloat16_t, + float, + float + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_bf16out.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_bf16out.cu new file mode 100644 index 0000000000000000000000000000000000000000..e3b1d81623512254ebea9f7f89c3f297f8133c86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_bf16out.cu @@ -0,0 +1,93 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations for FP8. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// FP8 GEMMs with BF16 output +void initialize_gemm_reference_operations_fp8in_bf16out(Manifest &manifest) { + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + bfloat16_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + bfloat16_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + bfloat16_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + bfloat16_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + bfloat16_t // ElementD + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_fp16out.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_fp16out.cu new file mode 100644 index 0000000000000000000000000000000000000000..e0534966306084434bdd7fb7e4655b80a2e555cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_fp16out.cu @@ -0,0 +1,93 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations for FP8. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// FP8 GEMMs with FP16 output +void initialize_gemm_reference_operations_fp8in_fp16out(Manifest &manifest) { + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + half_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float , // ElementAccumulator + half_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + half_t // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + half_t, // ElementC + float, // ElementScalar + float, // ElementAccumulator + half_t // ElementD + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_fp32out.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_fp32out.cu new file mode 100644 index 0000000000000000000000000000000000000000..acfdf0c3669a833d800b8d8e5ff5f61a2922f025 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp8in_fp32out.cu @@ -0,0 +1,93 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations for FP8. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// FP8 GEMMs with FP32 output +void initialize_gemm_reference_operations_fp8in_fp32out(Manifest &manifest) { + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e4m3_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e4m3_t, // ElementA + float_e5m2_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e4m3_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float // ElementD + >(manifest); + + make_gemm_real_canonical_layouts< + float_e5m2_t, // ElementA + float_e5m2_t, // ElementB + float, // ElementC + float, // ElementScalar + float, // ElementAccumulator + float // ElementD + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp_other.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp_other.cu new file mode 100644 index 0000000000000000000000000000000000000000..5f8a1e30b5344511e45c0045e89ed76554c4e00a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_fp_other.cu @@ -0,0 +1,88 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_gemm_reference_operations_fp_other(Manifest &manifest) { + make_gemm_real_canonical_layouts< + half_t, + half_t, + half_t, + half_t, + half_t + >(manifest); + + make_gemm_real_canonical_layouts< + double, + double, + double, + double, + double + >(manifest); + + make_gemm_complex_canonical_layouts< + complex, + complex, + complex, + complex, + complex + >(manifest); + + make_gemm_complex_canonical_layouts< + complex, + complex, + complex, + complex, + complex + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int4.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int4.cu new file mode 100644 index 0000000000000000000000000000000000000000..c4b1d8106395b292a1224e6de63a7084bb489c94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int4.cu @@ -0,0 +1,129 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_gemm_reference_operations_int4(Manifest &manifest) { + make_gemm_interleaved_layouts< + 64, + int4b_t, + int4b_t, + int32_t, + int32_t, + int32_t + >(manifest); + + make_gemm_interleaved_layouts< + 64, + int4b_t, + int4b_t, + int32_t, + float, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 64, + int4b_t, + int4b_t, + int4b_t, + float, + int32_t, + int4b_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 64, + uint4b_t, + uint4b_t, + int32_t, + int32_t, + int32_t + >(manifest); + + make_gemm_interleaved_layouts< + 64, + uint4b_t, + uint4b_t, + int32_t, + float, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 64, + uint4b_t, + uint4b_t, + uint4b_t, + float, + int32_t, + uint4b_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 64, + uint4b_t, + uint4b_t, + int4b_t, + float, + int32_t, + int4b_t, + NumericConverterClamp + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_canonical.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_canonical.cu new file mode 100644 index 0000000000000000000000000000000000000000..3237776c02412e5a6778fc48ffd50b1e43de9913 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_canonical.cu @@ -0,0 +1,122 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_gemm_reference_operations_int8_canonical(Manifest &manifest) { + make_gemm_real_canonical_layouts< + int8_t, + int8_t, + int32_t, + int32_t, + int32_t + >(manifest); + + make_gemm_real_canonical_layouts< + int8_t, + int8_t, + int8_t, + float, + int32_t, + int8_t, + NumericConverterClamp + >(manifest); + + make_gemm_real_canonical_layouts< + int8_t, + int8_t, + int32_t, + float, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_gemm_real_canonical_layouts< + uint8_t, + uint8_t, + int32_t, + int32_t, + int32_t + >(manifest); + + make_gemm_real_canonical_layouts< + uint8_t, + uint8_t, + int8_t, + float, + int32_t, + int8_t, + NumericConverterClamp + >(manifest); + + make_gemm_real_canonical_layouts< + uint8_t, + uint8_t, + int32_t, + float, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_gemm_real_canonical_layouts< + int8_t, + int8_t, + int8_t, + int32_t, + int32_t, + int8_t, + NumericConverterClamp + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_interleaved_32.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_interleaved_32.cu new file mode 100644 index 0000000000000000000000000000000000000000..814c003418f4641353dfa7242a42ecfbe931528e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_interleaved_32.cu @@ -0,0 +1,129 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_gemm_reference_operations_int8_interleaved_32(Manifest &manifest) { + make_gemm_interleaved_layouts< + 32, + int8_t, + int8_t, + int32_t, + int32_t, + int32_t + >(manifest); + + make_gemm_interleaved_layouts< + 32, + int8_t, + int8_t, + int32_t, + float, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 32, + int8_t, + int8_t, + int8_t, + float, + int32_t, + int8_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 32, + uint8_t, + uint8_t, + int32_t, + int32_t, + int32_t + >(manifest); + + make_gemm_interleaved_layouts< + 32, + uint8_t, + uint8_t, + int32_t, + float, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 32, + uint8_t, + uint8_t, + uint8_t, + float, + int32_t, + uint8_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 32, + uint8_t, + uint8_t, + int8_t, + float, + int32_t, + int8_t, + NumericConverterClamp + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_interleaved_64.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_interleaved_64.cu new file mode 100644 index 0000000000000000000000000000000000000000..04c7d0e7d21ed8d5f633908512d1d6f388529c5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_int8_interleaved_64.cu @@ -0,0 +1,129 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Instantiates GEMM reference implementations. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "gemm_reference_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_gemm_reference_operations_int8_interleaved_64(Manifest &manifest) { + make_gemm_interleaved_layouts< + 64, + int4b_t, + int4b_t, + int32_t, + int32_t, + int32_t + >(manifest); + + make_gemm_interleaved_layouts< + 64, + int4b_t, + int4b_t, + int32_t, + float, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 64, + int4b_t, + int4b_t, + int4b_t, + float, + int32_t, + int4b_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 64, + uint4b_t, + uint4b_t, + int32_t, + int32_t, + int32_t + >(manifest); + + make_gemm_interleaved_layouts< + 64, + uint4b_t, + uint4b_t, + int32_t, + float, + int32_t, + int32_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 64, + uint4b_t, + uint4b_t, + uint4b_t, + float, + int32_t, + uint4b_t, + NumericConverterClamp + >(manifest); + + make_gemm_interleaved_layouts< + 64, + uint4b_t, + uint4b_t, + int4b_t, + float, + int32_t, + int4b_t, + NumericConverterClamp + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_reference_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_reference_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..b300529bd68451611416bc46dd196a51263f7fcc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/gemm_reference_operation.h @@ -0,0 +1,542 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines reference operations for GEMM operation kinds in CUTLASS Library +*/ + +#pragma once + +#include +#include +#include + +#include "cutlass/cutlass.h" + +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" +#include "cutlass/library/util.h" +#include "library_internal.h" + +#include "cutlass/util/reference/host/gemm_complex.h" +#include "cutlass/util/reference/device/gemm_complex.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + Provider Provider_, + typename ElementA_, + typename LayoutA_, + cutlass::ComplexTransform TransformA, + typename ElementB_, + typename LayoutB_, + cutlass::ComplexTransform TransformB, + typename ElementC_, + typename LayoutC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ElementD_ = ElementC_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +class GemmReferenceOperation : public Operation { +public: + static Provider const kProvider = Provider_; + + using ElementA = ElementA_; + using LayoutA = LayoutA_; + using TensorRefA = TensorRef; + static cutlass::ComplexTransform const kTransformA = TransformA; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + using TensorRefB = TensorRef; + static cutlass::ComplexTransform const kTransformB = TransformB; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using ElementD = ElementD_; + using TensorRefC = TensorRef; + using TensorRefD = TensorRef; + using ElementCompute = ElementCompute_; + using ElementAccumulator = ElementAccumulator_; + using ConvertOp = ConvertOp_; + using InnerProductOp = InnerProductOp_; + +protected: + + /// Storage for the name string + std::string name_; + + /// + GemmDescription description_; + +public: + + /// Constructor + GemmReferenceOperation() { + + // Basic information + description_.provider = kProvider; + description_.kind = OperationKind::kGemm; + description_.gemm_kind = GemmKind::kUniversal; + + // Tensor description + description_.A = make_TensorDescription(); + description_.transform_A = ComplexTransformMap::kId; + description_.B = make_TensorDescription(); + description_.transform_B = ComplexTransformMap::kId; + description_.C = make_TensorDescription(); + description_.D = make_TensorDescription(); + + // Epilogue compute and accumulator type description + description_.element_epilogue = NumericTypeMap::kId; + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + // Compute capability for gemm reference + description_.tile_description.minimum_compute_capability = + (kProvider == Provider::kReferenceDevice ? 50 : 0); + + description_.tile_description.maximum_compute_capability = 1024; + + // Procedural name + std::stringstream ss; + + ss << "gemm" + << "_reference_" << to_string(description_.provider) + << "_" << to_string(description_.A.element) << to_string(description_.A.layout) + << "_" << to_string(description_.B.element) << to_string(description_.B.layout) + << "_" << to_string(description_.C.element) << to_string(description_.C.layout) + << "_" << to_string(description_.tile_description.math_instruction.element_accumulator); + + name_ = ss.str(); + + description_.name = name_.c_str(); + + // Epilogue compute and accumulator type description + description_.element_epilogue = NumericTypeMap::kId; + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + } + + /// Returns the description of the GEMM operation + virtual OperationDescription const & description() const { + return description_; + } + + virtual Status can_implement( + void const *configuration, + void const *arguments) const { + + return Status::kSuccess; + } + + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(GemmUniversalConfiguration); + } + + virtual uint64_t get_device_workspace_size( + void const *configuration, + void const *arguments = nullptr) const { + + return 0; + } + + virtual Status initialize( + void const *configuration, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + std::memcpy(host_workspace, configuration, get_host_workspace_size(configuration)); + + return Status::kSuccess; + } + + virtual Status run( + void const *arguments, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + GemmUniversalConfiguration const &config = *static_cast(host_workspace); + GemmUniversalArguments const &args = *static_cast(arguments); + + TensorRefA ref_A{static_cast(const_cast(args.A)), LayoutA(int(config.lda))}; + TensorRefB ref_B{static_cast(const_cast(args.B)), LayoutB(int(config.ldb))}; + TensorRefC ref_C{static_cast(const_cast(args.C)), LayoutC(int(config.ldc))}; + TensorRefD ref_D{static_cast(args.D), LayoutC(int(config.ldd))}; + + if (kProvider == Provider::kReferenceHost) { + + cutlass::reference::host::GemmComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ElementD, + ConvertOp, + InnerProductOp + >( + config.problem_size, + *static_cast(args.alpha), + ref_A, + kTransformA, + ref_B, + kTransformB, + *static_cast(args.beta), + ref_C, + ref_D, + ElementAccumulator(), + ((config.mode == library::GemmUniversalMode::kBatched) ? config.batch_count : 1), + args.batch_stride_A, + args.batch_stride_B, + args.batch_stride_C, + args.batch_stride_D + ); + + return Status::kSuccess; + } + else if (kProvider == Provider::kReferenceDevice) { + + cutlass::reference::device::GemmComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ElementD, + ConvertOp, + InnerProductOp + >( + config.problem_size, + *static_cast(args.alpha), + ref_A, + kTransformA, + ref_B, + kTransformB, + *static_cast(args.beta), + ref_C, + ref_D, + ElementAccumulator(), + ((config.mode == library::GemmUniversalMode::kBatched) ? config.batch_count : 1), + args.batch_stride_A, + args.batch_stride_B, + args.batch_stride_C, + args.batch_stride_D + ); + + return Status::kSuccess; + } + + return Status::kErrorNotSupported; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + cutlass::ComplexTransform TransformA, + typename ElementB_, + typename LayoutB_, + cutlass::ComplexTransform TransformB, + typename ElementC_, + typename LayoutC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ElementD_ = ElementC_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +void make_gemm(Manifest &manifest) { + + manifest.append(new GemmReferenceOperation< + Provider::kReferenceHost, + ElementA_, LayoutA_, TransformA, + ElementB_, LayoutB_, TransformB, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >); + + manifest.append(new GemmReferenceOperation< + Provider::kReferenceDevice, + ElementA_, LayoutA_, TransformA, + ElementB_, LayoutB_, TransformB, + ElementC_, LayoutC_, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >); +} + +/// Helper to create NN, NT, TN, and TT GEMM layouts. +template < + typename ElementA_, cutlass::ComplexTransform TransformA, + typename ElementB_, cutlass::ComplexTransform TransformB, + typename ElementC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ElementD_ = ElementC_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +void make_gemm_canonical_layouts(Manifest &manifest) { + + // M Major outputs + make_gemm< + ElementA_, cutlass::layout::ColumnMajor, TransformA, + ElementB_, cutlass::layout::ColumnMajor, TransformB, + ElementC_, cutlass::layout::ColumnMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm< + ElementA_, cutlass::layout::ColumnMajor, TransformA, + ElementB_, cutlass::layout::RowMajor, TransformB, + ElementC_, cutlass::layout::ColumnMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm< + ElementA_, cutlass::layout::RowMajor, TransformA, + ElementB_, cutlass::layout::ColumnMajor, TransformB, + ElementC_, cutlass::layout::ColumnMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm< + ElementA_, cutlass::layout::RowMajor, TransformA, + ElementB_, cutlass::layout::RowMajor, TransformB, + ElementC_, cutlass::layout::ColumnMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + // N Major outputs + make_gemm< + ElementA_, cutlass::layout::ColumnMajor, TransformA, + ElementB_, cutlass::layout::ColumnMajor, TransformB, + ElementC_, cutlass::layout::RowMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm< + ElementA_, cutlass::layout::ColumnMajor, TransformA, + ElementB_, cutlass::layout::RowMajor, TransformB, + ElementC_, cutlass::layout::RowMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm< + ElementA_, cutlass::layout::RowMajor, TransformA, + ElementB_, cutlass::layout::ColumnMajor, TransformB, + ElementC_, cutlass::layout::RowMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm< + ElementA_, cutlass::layout::RowMajor, TransformA, + ElementB_, cutlass::layout::RowMajor, TransformB, + ElementC_, cutlass::layout::RowMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); +} + + +/// Helper to create TN and interleaved layouts GEMM layouts. +template < + int InterleaveK, + typename ElementA_, + typename ElementB_, + typename ElementC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ElementD_ = ElementC_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +void make_gemm_interleaved_layouts(Manifest &manifest) { + + make_gemm< + ElementA_, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, + ElementB_, cutlass::layout::ColumnMajor, cutlass::ComplexTransform::kNone, + ElementC_, cutlass::layout::ColumnMajor, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + +} + +/// Helper to real-valued GEMM with canonical layouts +template < + typename ElementA_, + typename ElementB_, + typename ElementC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ElementD_ = ElementC_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +void make_gemm_real_canonical_layouts(Manifest &manifest) { + make_gemm_canonical_layouts< + ElementA_, cutlass::ComplexTransform::kNone, + ElementB_, cutlass::ComplexTransform::kNone, + ElementC_, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); +} + +// Helper to create all complex transformation permutations +template < + typename ElementA_, + typename ElementB_, + typename ElementC_, + typename ElementCompute_, + typename ElementAccumulator_ = ElementCompute_, + typename ElementD_ = ElementC_, + typename ConvertOp_ = NumericConverter, + typename InnerProductOp_ = multiply_add +> +void make_gemm_complex_canonical_layouts(Manifest &manifest) { + + make_gemm_canonical_layouts< + ElementA_, cutlass::ComplexTransform::kNone, + ElementB_, cutlass::ComplexTransform::kNone, + ElementC_, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm_canonical_layouts< + ElementA_, cutlass::ComplexTransform::kConjugate, + ElementB_, cutlass::ComplexTransform::kConjugate, + ElementC_, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm_canonical_layouts< + ElementA_, cutlass::ComplexTransform::kNone, + ElementB_, cutlass::ComplexTransform::kConjugate, + ElementC_, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); + + make_gemm_canonical_layouts< + ElementA_, cutlass::ComplexTransform::kConjugate, + ElementB_, cutlass::ComplexTransform::kNone, + ElementC_, + ElementCompute_, + ElementAccumulator_, + ElementD_, + ConvertOp_, + InnerProductOp_ + >(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/initialize_reference_operations.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/initialize_reference_operations.cu new file mode 100644 index 0000000000000000000000000000000000000000..15ce52287d3b73e974f6a3a56d188c6d0d958d63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/reference/initialize_reference_operations.cu @@ -0,0 +1,93 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief + +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +// note: init methods for the same op-class may be split into multiple to parallelize compilation +void initialize_gemm_reference_operations_int4(Manifest &manifest); +void initialize_gemm_reference_operations_int8_interleaved_32(Manifest &manifest); +void initialize_gemm_reference_operations_int8_interleaved_64(Manifest &manifest); +void initialize_gemm_reference_operations_int8_canonical(Manifest &manifest); +void initialize_gemm_reference_operations_e4m3a_e4m3out(Manifest &manifest); +void initialize_gemm_reference_operations_e5m2a_e4m3out(Manifest &manifest); +void initialize_gemm_reference_operations_e4m3a_e5m2out(Manifest &manifest); +void initialize_gemm_reference_operations_e5m2a_e5m2out(Manifest &manifest); +void initialize_gemm_reference_operations_fp8in_fp16out(Manifest &manifest); +void initialize_gemm_reference_operations_fp8in_bf16out(Manifest &manifest); +void initialize_gemm_reference_operations_fp8in_fp32out(Manifest &manifest); +void initialize_gemm_reference_operations_fp32out(Manifest &manifest); +void initialize_gemm_reference_operations_fp_other(Manifest &manifest); + +void initialize_conv2d_reference_operations(Manifest &manifest); +void initialize_conv3d_reference_operations(Manifest &manifest); + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_reference_operations(Manifest &manifest) { + initialize_conv2d_reference_operations(manifest); + initialize_conv3d_reference_operations(manifest); + + initialize_gemm_reference_operations_int4(manifest); + + initialize_gemm_reference_operations_int8_interleaved_32(manifest); + initialize_gemm_reference_operations_int8_interleaved_64(manifest); + initialize_gemm_reference_operations_int8_canonical(manifest); + + initialize_gemm_reference_operations_e4m3a_e4m3out(manifest); + initialize_gemm_reference_operations_e5m2a_e4m3out(manifest); + initialize_gemm_reference_operations_e4m3a_e5m2out(manifest); + initialize_gemm_reference_operations_e5m2a_e5m2out(manifest); + initialize_gemm_reference_operations_fp8in_fp16out(manifest); + initialize_gemm_reference_operations_fp8in_bf16out(manifest); + initialize_gemm_reference_operations_fp8in_fp32out(manifest); + + initialize_gemm_reference_operations_fp32out(manifest); + initialize_gemm_reference_operations_fp_other(manifest); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/singleton.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/singleton.cu new file mode 100644 index 0000000000000000000000000000000000000000..23154484d789a9c7d3fda5c6e966b93c282a064d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/singleton.cu @@ -0,0 +1,62 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#include +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" +#include "cutlass/library/operation_table.h" +#include "cutlass/library/singleton.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Singleton::Singleton() { + + manifest.initialize(); + + operation_table.append(manifest); +} + +Singleton const & Singleton::get() { + static Singleton instance; + return instance; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/symm_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/symm_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..59bb5214869e962343cb826ec9cb64d8600c16d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/symm_operation.h @@ -0,0 +1,379 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all Symm operation kinds (Symm, Hemm) + in CUTLASS Library. + + +*/ + +#pragma once +#include +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/symm.h" +#include "cutlass/gemm/kernel/default_symm_universal.h" + +#include "cutlass/library/library.h" +#include "library_internal.h" +#include "cutlass/core_io.h" +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class SymmOperationBase : public Operation { +public: + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + static BlasMode const kBlasMode = Operator::kBlasMode; + static SideMode const kSideModeA = Operator::kSideModeA; + static FillMode const kFillModeA = Operator::kFillModeA; + + using OperatorArguments = typename Operator::Arguments; + +protected: + + /// + SymmDescription description_; + +public: + + /// Constructor + SymmOperationBase(char const *name = "unknown_symm") { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.symm_kind = SymmKind::kUniversal; + description_.side_mode = kSideModeA; + description_.fill_mode = kFillModeA; + description_.blas_mode = kBlasMode; + + description_.kind = OperationKind::kSymm; + + description_.tile_description.threadblock_shape = make_Coord( + Operator::ThreadblockShape::kM, + Operator::ThreadblockShape::kN, + Operator::ThreadblockShape::kK); + + description_.tile_description.threadblock_stages = Operator::kStages; + + description_.tile_description.warp_count = make_Coord( + Operator::SymmKernel::WarpCount::kM, + Operator::SymmKernel::WarpCount::kN, + Operator::SymmKernel::WarpCount::kK); + + description_.tile_description.math_instruction.instruction_shape = make_Coord( + Operator::InstructionShape::kM, + Operator::InstructionShape::kN, + Operator::InstructionShape::kK); + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + description_.tile_description.math_instruction.opcode_class = + OpcodeClassMap::kId; + + description_.tile_description.math_instruction.math_operation = + MathOperationMap::kId; + + description_.tile_description.minimum_compute_capability = + ArchMap::kMin; + + description_.tile_description.maximum_compute_capability = + ArchMap::kMax; + + description_.A = make_TensorDescription(Operator::kAlignmentA); + description_.B = make_TensorDescription(Operator::kAlignmentB); + description_.C = make_TensorDescription(Operator::kAlignmentC); + description_.element_epilogue = NumericTypeMap::kId; + + description_.split_k_mode = SplitKMode::kNone; + } + + /// Returns the description of the SYMM operation + virtual OperationDescription const & description() const { + return description_; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class SymmOperation : public SymmOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + static BlasMode const kBlasMode = Operator::kBlasMode; + static SideMode const kSideModeA = Operator::kSideModeA; + static FillMode const kFillModeA = Operator::kFillModeA; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + SymmOperation(char const *name = "unknown_symm"): + SymmOperationBase(name) { + + this->description_.symm_kind = SymmKind::kUniversal; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + SymmConfiguration const *configuration) { + + //operator_args.mode = configuration->mode; + + operator_args.problem_size = configuration->problem_size; + operator_args.batch_count = configuration->batch_count; + + operator_args.lda = int(configuration->lda); + operator_args.ldb = int(configuration->ldb); + operator_args.ldc = int(configuration->ldc); + operator_args.ldd = int(configuration->ldd); + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + SymmArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + // update arguments + operator_args.ptr_A = arguments->A; + operator_args.ptr_B = arguments->B; + operator_args.ptr_C = arguments->C; + operator_args.ptr_D = arguments->D; + + operator_args.batch_stride_A = arguments->batch_stride_A; + operator_args.batch_stride_B = arguments->batch_stride_B; + operator_args.batch_stride_C = arguments->batch_stride_C; + operator_args.batch_stride_D = arguments->batch_stride_D; + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + SymmConfiguration const *configuration = + static_cast(configuration_ptr); + + SymmArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + + return size; + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + //std::cout << "initialize() library::SymmOperation" << std::endl; + //print_operator_args(args); + status = op->initialize(args, device_workspace, stream); + + return status; + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + bool need_swapped_matrices = (kSideModeA == SideMode::kLeft && + std::is_same::value) || + (kSideModeA == SideMode::kRight && + std::is_same::value); + if (need_swapped_matrices) { + status = op->update(args.swapped_matrices(), device_workspace); + } else { + status = op->update(args, device_workspace); + } + + if (status != Status::kSuccess) { + return status; + } + + //std::cout << "run() library::SymmOperation" << std::endl; + //print_operator_args(args); + status = op->run(stream); + + return status; + } + + /// Call print_operator_args from the Conv2dOperation::initialize() + // to dump arguments passed on to cutlass operator for debugging + void print_operator_args(OperatorArguments &operator_args) const { + std::cout << "SymmOperation::OperatorArguments" << std::endl + << " problem_size:" << std::endl + << operator_args.problem_size << std::endl + << " epilogue (alpha, beta): " + << operator_args.epilogue.alpha << ", " + << operator_args.epilogue.beta << std::endl + << " ref_A (ptr, {stride}): " + << operator_args.ptr_A << ", {" + << operator_args.lda << "}" << std::endl + << " ref_B (ptr, {stride}): " + << operator_args.ptr_B << ", {" + << operator_args.ldb << "}" << std::endl + << " ref_C (ptr, {stride}): " + << operator_args.ptr_C << ", {" + << operator_args.ldc << "}" << std::endl + << " ref_D (ptr, {stride}): " + << operator_args.ptr_D << ", {" + << operator_args.ldd << "}" << std::endl; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/trmm_operation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/trmm_operation.h new file mode 100644 index 0000000000000000000000000000000000000000..55f4fa60b782d6b6c53ff91e178ecc063072bdea --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/trmm_operation.h @@ -0,0 +1,346 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines operations for all TRMM operation kinds in CUTLASS Library. + + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/device/trmm.h" +#include "cutlass/gemm/kernel/default_trmm_universal.h" +#include "cutlass/gemm/kernel/trmm_universal.h" + +#include "cutlass/library/library.h" +#include "library_internal.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class TrmmOperationBase : public Operation { +public: + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + static SideMode const kSideMode = Operator::kSideMode; + static FillMode const kFillMode = Operator::kFillMode; + static DiagType const kDiagType = Operator::kDiagType; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +protected: + + /// + TrmmDescription description_; + +public: + + /// Constructor + TrmmOperationBase(char const *name = "unknown_trmm") { + + description_.name = name; + description_.provider = Provider::kCUTLASS; + description_.kind = OperationKind::kTrmm; + description_.trmm_kind = TrmmKind::kUniversal; + description_.side_mode = kSideMode; + description_.fill_mode = kFillMode; + description_.diag_type = kDiagType; + + description_.tile_description.threadblock_shape = make_Coord( + Operator::ThreadblockShape::kM, + Operator::ThreadblockShape::kN, + Operator::ThreadblockShape::kK); + + description_.tile_description.threadblock_stages = Operator::kStages; + + description_.tile_description.warp_count = make_Coord( + Operator::TrmmKernel::WarpCount::kM, + Operator::TrmmKernel::WarpCount::kN, + Operator::TrmmKernel::WarpCount::kK); + + description_.tile_description.math_instruction.instruction_shape = make_Coord( + Operator::InstructionShape::kM, + Operator::InstructionShape::kN, + Operator::InstructionShape::kK); + + description_.tile_description.math_instruction.element_accumulator = + NumericTypeMap::kId; + + description_.tile_description.math_instruction.opcode_class = + OpcodeClassMap::kId; + + description_.tile_description.math_instruction.math_operation = + MathOperationMap::kId; + + description_.tile_description.minimum_compute_capability = + ArchMap::kMin; + + description_.tile_description.maximum_compute_capability = + ArchMap::kMax; + + description_.A = make_TensorDescription(Operator::kAlignmentA); + description_.B = make_TensorDescription(Operator::kAlignmentB); + description_.D = make_TensorDescription(Operator::kAlignmentC); + description_.element_epilogue = NumericTypeMap::kId; + + description_.split_k_mode = SplitKMode::kNone; + description_.transform_A = ComplexTransformMap::kId; + } + + /// Returns the description of the TRMM operation + virtual OperationDescription const & description() const { + return description_; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class TrmmOperation : public TrmmOperationBase { +public: + + using Operator = Operator_; + using ElementA = typename Operator::ElementA; + using LayoutA = typename Operator::LayoutA; + static SideMode const kSideMode = Operator::kSideMode; + static FillMode const kFillMode = Operator::kFillMode; + static DiagType const kDiagType = Operator::kDiagType; + using ElementB = typename Operator::ElementB; + using LayoutB = typename Operator::LayoutB; + using ElementC = typename Operator::ElementC; + using LayoutC = typename Operator::LayoutC; + using ElementAccumulator = typename Operator::ElementAccumulator; + using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; + + using OperatorArguments = typename Operator::Arguments; + +public: + + /// Constructor + TrmmOperation(char const *name = "unknown_trmm"): + TrmmOperationBase(name) { + + this->description_.trmm_kind = TrmmKind::kUniversal; + } + +protected: + + /// Constructs the arguments structure given the configuration and arguments + static Status construct_arguments_( + OperatorArguments &operator_args, + TrmmConfiguration const *configuration) { + + //operator_args.mode = configuration->mode; + + operator_args.problem_size = configuration->problem_size; + operator_args.batch_count = configuration->batch_count; + + operator_args.lda = int(configuration->lda); + operator_args.ldb = int(configuration->ldb); + operator_args.ldd = int(configuration->ldd); + + return Status::kSuccess; + } + + /// Constructs the arguments structure given the configuration and arguments + static Status update_arguments_( + OperatorArguments &operator_args, + TrmmArguments const *arguments) { + + if (arguments->pointer_mode == ScalarPointerMode::kHost) { + typename Operator::EpilogueOutputOp::Params params( + *static_cast(arguments->alpha), + *static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ + typename Operator::EpilogueOutputOp::Params params( + static_cast(arguments->alpha), + static_cast(arguments->beta) + ); + operator_args.epilogue = params; + } + else { + return Status::kErrorInvalidProblem; + } + + // update arguments + operator_args.ptr_A = arguments->A; + operator_args.ptr_B = arguments->B; + operator_args.batch_stride_A = arguments->batch_stride_A; + operator_args.batch_stride_B = arguments->batch_stride_B; + operator_args.ptr_D = arguments->D; + operator_args.batch_stride_D = arguments->batch_stride_D; + + return Status::kSuccess; + } + +public: + + /// Returns success if the operation can proceed + virtual Status can_implement( + void const *configuration_ptr, + void const *arguments_ptr) const { + + TrmmConfiguration const *configuration = + static_cast(configuration_ptr); + + TrmmArguments const *arguments = + static_cast(arguments_ptr); + + OperatorArguments args; + + Status status = construct_arguments_(args, configuration); + + if (status != Status::kSuccess) { + return status; + } + + status = update_arguments_(args, arguments); + + if (status != Status::kSuccess) { + return status; + } + + return Operator::can_implement(args); + } + + /// Gets the host-side workspace + virtual uint64_t get_host_workspace_size( + void const *configuration) const { + + return sizeof(Operator); + } + + /// Gets the device-side workspace + virtual uint64_t get_device_workspace_size( + void const *configuration_ptr, + void const *arguments_ptr = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return 0; + } + + uint64_t size = Operator::get_workspace_size(args); + + return size; + } + + /// Initializes the workspace + virtual Status initialize( + void const *configuration_ptr, + void *host_workspace, + void *device_workspace, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = construct_arguments_( + args, + static_cast(configuration_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = new (host_workspace) Operator; + + status = op->initialize(args, device_workspace, stream); + + return status; + } + + /// Runs the kernel + virtual Status run( + void const *arguments_ptr, + void *host_workspace, + void *device_workspace = nullptr, + cudaStream_t stream = nullptr) const { + + OperatorArguments args; + + Status status = update_arguments_( + args, + static_cast(arguments_ptr)); + + if (status != Status::kSuccess) { + return status; + } + + Operator *op = static_cast(host_workspace); + + bool need_swapped_matrices = (kSideMode == SideMode::kLeft && + std::is_same::value) || + (kSideMode == SideMode::kRight && + std::is_same::value); + if (need_swapped_matrices) { + status = op->update(args.swapped_matrices(), device_workspace); + } else { + status = op->update(args, device_workspace); + } + + if (status != Status::kSuccess) { + return status; + } + + status = op->run(stream); + + return status; + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/util.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/util.cu new file mode 100644 index 0000000000000000000000000000000000000000..f734fb8f66907db4884f43f562f56c89e81dd8bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/library/src/util.cu @@ -0,0 +1,1707 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#include +#include +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/complex.h" +#include "cutlass/blas3.h" + +#include "cutlass/layout/matrix.h" + +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" + +namespace cutlass { +namespace library { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + Provider enumerant; +} +Provider_enumerants[] = { + {"none", "None", Provider::kNone}, + {"cutlass", "CUTLASS", Provider::kCUTLASS}, + {"host", "reference_host", Provider::kReferenceHost}, + {"device", "reference_device", Provider::kReferenceDevice}, + {"cublas", "cuBLAS", Provider::kCUBLAS}, + {"cudnn", "cuDNN", Provider::kCUDNN}, +}; + +/// Converts a Provider enumerant to a string +char const *to_string(Provider provider, bool pretty) { + + for (auto const & possible : Provider_enumerants) { + if (provider == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Parses a Provider enumerant from a string +template <> +Provider from_string(std::string const &str) { + + for (auto const & possible : Provider_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return Provider::kInvalid; +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + GemmKind enumerant; +} +GemmKind_enumerants[] = { + {"gemm", "", GemmKind::kGemm}, + {"spgemm", "", GemmKind::kSparse}, + {"universal", "", GemmKind::kUniversal}, + {"planar_complex", "", GemmKind::kPlanarComplex}, + {"planar_complex_array", "", GemmKind::kPlanarComplexArray}, + {"grouped", "", GemmKind::kGrouped}, +}; + +/// Converts a GemmKind enumerant to a string +char const *to_string(GemmKind type, bool pretty) { + + for (auto const & possible : GemmKind_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + RankKKind enumerant; +} +RankKKind_enumerants[] = { + {"universal", "", RankKKind::kUniversal}, +}; + +/// Converts a SyrkKind enumerant to a string +char const *to_string(RankKKind type, bool pretty) { + + for (auto const & possible :RankKKind_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + TrmmKind enumerant; +} +TrmmKind_enumerants[] = { + {"universal", "", TrmmKind::kUniversal}, +}; + +/// Converts a TrmmKind enumerant to a string +char const *to_string(TrmmKind type, bool pretty) { + + for (auto const & possible :TrmmKind_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + SymmKind enumerant; +} +SymmKind_enumerants[] = { + {"universal", "", SymmKind::kUniversal}, +}; + +/// Converts a SymmKind enumerant to a string +char const *to_string(SymmKind type, bool pretty) { + + for (auto const & possible :SymmKind_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + SideMode enumerant; +} +SideMode_enumerants[] = { + {"left", "Left", SideMode::kLeft}, + {"right", "Right", SideMode::kRight} +}; + +/// Converts a SideMode enumerant to a string +char const *to_string(SideMode type, bool pretty) { + + for (auto const & possible :SideMode_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + FillMode enumerant; +} +FillMode_enumerants[] = { + {"lower", "Lower", FillMode::kLower}, + {"upper", "Upper", FillMode::kUpper} +}; + +/// Converts a FillMode enumerant to a string +char const *to_string(FillMode type, bool pretty) { + + for (auto const & possible :FillMode_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + BlasMode enumerant; +} +BlasMode_enumerants[] = { + {"symmetric", "Symmetric", BlasMode::kSymmetric}, + {"hermitian", "Hermitian", BlasMode::kHermitian} +}; + +/// Converts a BlasMode enumerant to a string +char const *to_string(BlasMode type, bool pretty) { + + for (auto const & possible :BlasMode_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + DiagType enumerant; +} +DiagType_enumerants[] = { + {"nonunit", "NonUnit", DiagType::kNonUnit}, + {"unit", "Unit", DiagType::kUnit} +}; + +/// Converts a DiagType enumerant to a string +char const *to_string(DiagType type, bool pretty) { + + for (auto const & possible :DiagType_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + OperationKind enumerant; +} +OperationKind_enumerants[] = { + {"eq_gemm", "EqGemm", OperationKind::kEqGemm}, + {"gemm", "Gemm", OperationKind::kGemm}, + {"rank_k", "RankK", OperationKind::kRankK}, + {"rank_2k", "Rank2K", OperationKind::kRank2K}, + {"trmm", "Trmm", OperationKind::kTrmm}, + {"symm", "Symm", OperationKind::kSymm}, + {"conv2d", "Conv2d", OperationKind::kConv2d}, + {"conv3d", "Conv3d", OperationKind::kConv3d}, + {"spgemm", "SparseGemm", OperationKind::kSparseGemm}, +}; + +/// Converts a Status enumerant to a string +char const *to_string(OperationKind enumerant, bool pretty) { + + for (auto const & possible : OperationKind_enumerants) { + if (enumerant == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Converts a Status enumerant from a string +template <> +OperationKind from_string(std::string const &str) { + + for (auto const & possible : OperationKind_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return OperationKind::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + Status enumerant; +} +Status_enumerants[] = { + {"success", "Success", Status::kSuccess}, + {"misaligned_operand", "Error: misaligned operand", Status::kErrorMisalignedOperand}, + {"invalid_problem", "Error: invalid problem", Status::kErrorInvalidProblem}, + {"not_supported", "Error: not supported", Status::kErrorNotSupported}, + {"internal", "Error: internal", Status::kErrorInternal} +}; + +/// Converts a Status enumerant to a string +char const *to_string(Status status, bool pretty) { + + for (auto const & possible : Status_enumerants) { + if (status == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Converts a Status enumerant from a string +template <> +Status from_string(std::string const &str) { + + for (auto const & possible : Status_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return Status::kInvalid; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + NumericTypeID enumerant; +} +NumericTypeID_enumerants[] = { + {"unknown", "", NumericTypeID::kUnknown}, + {"void", "Void", NumericTypeID::kVoid}, + {"b1", "B1", NumericTypeID::kB1}, + {"u2", "U2", NumericTypeID::kU2}, + {"u4", "U4", NumericTypeID::kU4}, + {"u8", "U8", NumericTypeID::kU8}, + {"u16", "U16", NumericTypeID::kU16}, + {"u32", "U32", NumericTypeID::kU32}, + {"u64", "U64", NumericTypeID::kU64}, + {"s2", "S2", NumericTypeID::kS2}, + {"s4", "S4", NumericTypeID::kS4}, + {"s8", "S8", NumericTypeID::kS8}, + {"s16", "S16", NumericTypeID::kS16}, + {"s32", "S32", NumericTypeID::kS32}, + {"s64", "S64", NumericTypeID::kS64}, + {"fe4m3", "FE4M3", NumericTypeID::kFE4M3}, + {"fe5m2", "FE5M2", NumericTypeID::kFE5M2}, + {"f16", "F16", NumericTypeID::kF16}, + {"bf16", "BF16", NumericTypeID::kBF16}, + {"f32", "F32", NumericTypeID::kF32}, + {"tf32", "TF32", NumericTypeID::kTF32}, + {"f64", "F64", NumericTypeID::kF64}, + {"cf16", "CF16", NumericTypeID::kCF16}, + {"cbf16", "CBF16", NumericTypeID::kCBF16}, + {"cf32", "CF32", NumericTypeID::kCF32}, + {"ctf32", "CTF32", NumericTypeID::kCTF32}, + {"cf64", "CF64", NumericTypeID::kCF64}, + {"cu2", "CU2", NumericTypeID::kCU2}, + {"cu4", "CU4", NumericTypeID::kCU4}, + {"cu8", "CU8", NumericTypeID::kCU8}, + {"cu16", "CU16", NumericTypeID::kCU16}, + {"cu32", "CU32", NumericTypeID::kCU32}, + {"cu64", "CU64", NumericTypeID::kCU64}, + {"cs2", "CS2", NumericTypeID::kCS2}, + {"cs4", "CS4", NumericTypeID::kCS4}, + {"cs8", "CS8", NumericTypeID::kCS8}, + {"cs16", "CS16", NumericTypeID::kCS16}, + {"cs32", "CS32", NumericTypeID::kCS32}, + {"cs64", "CS64", NumericTypeID::kCS64}, + {"*", "", NumericTypeID::kUnknown} +}; + +/// Converts a NumericTypeID enumerant to a string +char const *to_string(NumericTypeID type, bool pretty) { + + for (auto const & possible : NumericTypeID_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Parses a NumericTypeID enumerant from a string +template <> +NumericTypeID from_string(std::string const &str) { + + for (auto const & possible : NumericTypeID_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return NumericTypeID::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns the size of a data type in bits +int sizeof_bits(NumericTypeID type) { + switch (type) { + case NumericTypeID::kFE4M3: return 8; + case NumericTypeID::kFE5M2: return 8; + case NumericTypeID::kF16: return 16; + case NumericTypeID::kBF16: return 16; + case NumericTypeID::kTF32: return 32; + case NumericTypeID::kF32: return 32; + case NumericTypeID::kF64: return 64; + case NumericTypeID::kCF16: return 32; + case NumericTypeID::kCBF16: return 32; + case NumericTypeID::kCF32: return 64; + case NumericTypeID::kCTF32: return 64; + case NumericTypeID::kCF64: return 128; + case NumericTypeID::kS2: return 2; + case NumericTypeID::kS4: return 4; + case NumericTypeID::kS8: return 8; + case NumericTypeID::kS16: return 16; + case NumericTypeID::kS32: return 32; + case NumericTypeID::kS64: return 64; + case NumericTypeID::kU2: return 2; + case NumericTypeID::kU4: return 4; + case NumericTypeID::kU8: return 8; + case NumericTypeID::kU16: return 16; + case NumericTypeID::kU32: return 32; + case NumericTypeID::kU64: return 64; + case NumericTypeID::kB1: return 1; + default: break; + } + return 0; +} + +/// Returns true if the numeric type is a complex data type or false if real-valued. +bool is_complex_type(NumericTypeID type) { + switch (type) { + case NumericTypeID::kCF16: return true; + case NumericTypeID::kCF32: return true; + case NumericTypeID::kCF64: return true; + case NumericTypeID::kCBF16: return true; + case NumericTypeID::kCTF32: return true; + default: break; + } + return false; +} + +/// Returns the field underlying a complex valued type +NumericTypeID get_real_type(NumericTypeID type) { + switch (type) { + case NumericTypeID::kCF16: return NumericTypeID::kF16; + case NumericTypeID::kCF32: return NumericTypeID::kF32; + case NumericTypeID::kCF64: return NumericTypeID::kF64; + case NumericTypeID::kCBF16: return NumericTypeID::kBF16; + case NumericTypeID::kCTF32: return NumericTypeID::kTF32; + default: break; + } + return type; +} + +/// Returns true if numeric type is integer +bool is_integer_type(NumericTypeID type) { + switch (type) { + case NumericTypeID::kS2: return true; + case NumericTypeID::kS4: return true; + case NumericTypeID::kS8: return true; + case NumericTypeID::kS16: return true; + case NumericTypeID::kS32: return true; + case NumericTypeID::kS64: return true; + case NumericTypeID::kU2: return true; + case NumericTypeID::kU4: return true; + case NumericTypeID::kU8: return true; + case NumericTypeID::kU16: return true; + case NumericTypeID::kU32: return true; + case NumericTypeID::kU64: return true; + default: break; + } + return false; +} + +/// Returns true if numeric type is signed +bool is_signed_type(NumericTypeID type) { + switch (type) { + case NumericTypeID::kFE4M3: return true; + case NumericTypeID::kFE5M2: return true; + case NumericTypeID::kF16: return true; + case NumericTypeID::kBF16: return true; + case NumericTypeID::kTF32: return true; + case NumericTypeID::kF32: return true; + case NumericTypeID::kF64: return true; + case NumericTypeID::kS2: return true; + case NumericTypeID::kS4: return true; + case NumericTypeID::kS8: return true; + case NumericTypeID::kS16: return true; + case NumericTypeID::kS32: return true; + case NumericTypeID::kS64: return true; + default: break; + } + return false; +} + +/// Returns true if numeric type is a signed integer +bool is_signed_integer(NumericTypeID type) { + return is_integer_type(type) && is_signed_type(type); +} + +/// returns true if numeric type is an unsigned integer +bool is_unsigned_integer(NumericTypeID type) { + return is_integer_type(type) && !is_signed_type(type); +} + +/// Returns true if numeric type is floating-point type +bool is_float_type(NumericTypeID type) { + switch (type) { + case NumericTypeID::kFE4M3: return true; + case NumericTypeID::kFE5M2: return true; + case NumericTypeID::kF16: return true; + case NumericTypeID::kBF16: return true; + case NumericTypeID::kTF32: return true; + case NumericTypeID::kF32: return true; + case NumericTypeID::kF64: return true; + case NumericTypeID::kCF16: return true; + case NumericTypeID::kCBF16: return true; + case NumericTypeID::kCTF32: return true; + case NumericTypeID::kCF32: return true; + case NumericTypeID::kCF64: return true; + default: break; + } + return false; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + LayoutTypeID layout; + char const *alias; +} +layout_aliases[] = { + {LayoutTypeID::kUnknown, "unknown"}, + {LayoutTypeID::kRowMajor, "row"}, + {LayoutTypeID::kRowMajor, "t"}, + {LayoutTypeID::kColumnMajor, "column"}, + {LayoutTypeID::kColumnMajor, "col"}, + {LayoutTypeID::kColumnMajor, "n"}, + + {LayoutTypeID::kColumnMajorInterleavedK2, "nk2"}, + {LayoutTypeID::kRowMajorInterleavedK2, "tk2"}, + + {LayoutTypeID::kColumnMajorInterleavedK4, "nk4"}, + {LayoutTypeID::kRowMajorInterleavedK4, "tk4"}, + + {LayoutTypeID::kColumnMajorInterleavedK16, "nk16"}, + {LayoutTypeID::kRowMajorInterleavedK16, "tk16"}, + + {LayoutTypeID::kColumnMajorInterleavedK32, "nk32"}, + {LayoutTypeID::kRowMajorInterleavedK32, "tk32"}, + + {LayoutTypeID::kColumnMajorInterleavedK64, "nk64"}, + {LayoutTypeID::kRowMajorInterleavedK64, "tk64"}, + + {LayoutTypeID::kTensorNCHW, "nchw"}, + {LayoutTypeID::kTensorNCDHW, "ncdhw"}, + {LayoutTypeID::kTensorNHWC, "nhwc"}, + {LayoutTypeID::kTensorNDHWC, "ndhwc"}, + {LayoutTypeID::kTensorNC32HW32, "nc32hw32"}, + {LayoutTypeID::kTensorNC64HW64, "nc64hw64"}, + {LayoutTypeID::kTensorC32RSK32, "c32rsk32"}, + {LayoutTypeID::kTensorC64RSK64, "c64rsk64"}, + + {LayoutTypeID::kUnknown, "*"}, + {LayoutTypeID::kInvalid, nullptr} +}; + +/// Converts a LayoutTypeID enumerant to a string +char const *to_string(LayoutTypeID layout, bool pretty) { + for (auto const & alias : layout_aliases) { + if (alias.layout == layout) { + return alias.alias; + } + } + return pretty ? "Invalid" : "invalid"; +} + +/// Parses a LayoutTypeID enumerant from a string +template <> +LayoutTypeID from_string(std::string const &str) { + for (auto const & alias : layout_aliases) { + if (str.compare(alias.alias) == 0) { + return alias.layout; + } + } + return LayoutTypeID::kInvalid; +} + +/// Gets stride rank for the layout_id (static function) +int get_layout_stride_rank(LayoutTypeID layout_id) { + switch (layout_id) { + case LayoutTypeID::kColumnMajor: + return cutlass::layout::ColumnMajor::kStrideRank; + case LayoutTypeID::kRowMajor: + return cutlass::layout::RowMajor::kStrideRank; + case LayoutTypeID::kColumnMajorInterleavedK2: + return cutlass::layout::ColumnMajorInterleaved<2>::kStrideRank; + case LayoutTypeID::kRowMajorInterleavedK2: + return cutlass::layout::RowMajorInterleaved<2>::kStrideRank; + case LayoutTypeID::kColumnMajorInterleavedK4: + return cutlass::layout::ColumnMajorInterleaved<4>::kStrideRank; + case LayoutTypeID::kRowMajorInterleavedK4: + return cutlass::layout::RowMajorInterleaved<4>::kStrideRank; + case LayoutTypeID::kColumnMajorInterleavedK16: + return cutlass::layout::ColumnMajorInterleaved<16>::kStrideRank; + case LayoutTypeID::kRowMajorInterleavedK16: + return cutlass::layout::RowMajorInterleaved<16>::kStrideRank; + case LayoutTypeID::kColumnMajorInterleavedK32: + return cutlass::layout::ColumnMajorInterleaved<32>::kStrideRank; + case LayoutTypeID::kRowMajorInterleavedK32: + return cutlass::layout::RowMajorInterleaved<32>::kStrideRank; + case LayoutTypeID::kColumnMajorInterleavedK64: + return cutlass::layout::ColumnMajorInterleaved<64>::kStrideRank; + case LayoutTypeID::kRowMajorInterleavedK64: + return cutlass::layout::RowMajorInterleaved<64>::kStrideRank; + case LayoutTypeID::kTensorNCHW: + return cutlass::layout::TensorNCHW::kStrideRank; + case LayoutTypeID::kTensorNHWC: + return cutlass::layout::TensorNHWC::kStrideRank; + case LayoutTypeID::kTensorNDHWC: + return cutlass::layout::TensorNDHWC::kStrideRank; + case LayoutTypeID::kTensorNC32HW32: + return cutlass::layout::TensorNCxHWx<32>::kStrideRank; + case LayoutTypeID::kTensorNC64HW64: + return cutlass::layout::TensorNCxHWx<64>::kStrideRank; + case LayoutTypeID::kTensorC32RSK32: + return cutlass::layout::TensorCxRSKx<32>::kStrideRank; + case LayoutTypeID::kTensorC64RSK64: + return cutlass::layout::TensorCxRSKx<64>::kStrideRank; + default: + throw std::runtime_error("Unsupported LayoutTypeID in LayoutType::get_stride_rank"); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + OpcodeClassID enumerant; +} +OpcodeClassID_enumerants[] = { + {"simt", "", OpcodeClassID::kSimt}, + {"tensorop", "", OpcodeClassID::kTensorOp}, + {"wmmatensorop", "", OpcodeClassID::kWmmaTensorOp}, + {"wmma", "", OpcodeClassID::kWmmaTensorOp}, +}; + +/// Converts a OpcodeClassID enumerant to a string +char const *to_string(OpcodeClassID type, bool pretty) { + + for (auto const & possible : OpcodeClassID_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Converts a OpcodeClassID enumerant from a string +template <> +OpcodeClassID from_string(std::string const &str) { + + for (auto const & possible : OpcodeClassID_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return OpcodeClassID::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + ComplexTransform enumerant; +} +ComplexTransform_enumerants[] = { + {"n", "none", ComplexTransform::kNone}, + {"c", "conj", ComplexTransform::kConjugate} +}; + +/// Converts a ComplexTransform enumerant to a string +char const *to_string(ComplexTransform type, bool pretty) { + + for (auto const & possible : ComplexTransform_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Converts a ComplexTransform enumerant from a string +template <> +ComplexTransform from_string(std::string const &str) { + + for (auto const & possible : ComplexTransform_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return ComplexTransform::kInvalid; +} + + +static struct { + char const *text; + char const *pretty; + SplitKMode enumerant; +} +SplitKMode_enumerants[] = { + {"serial", "", SplitKMode::kSerial}, + {"parallel", "", SplitKMode::kParallel}, +}; + +/// Converts a SplitKMode enumerant to a string +char const *to_string(SplitKMode type, bool pretty) { + + for (auto const & possible : SplitKMode_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Converts a SplitKMode enumerant from a string +template <> +SplitKMode from_string(std::string const &str) { + + for (auto const & possible : SplitKMode_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return SplitKMode::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +static struct { + char const *text; + char const *pretty; + ConvModeID enumerant; +} +ConvModeID_enumerants[] = { + {"cross", "", ConvModeID::kCrossCorrelation}, + {"conv", "", ConvModeID::kConvolution}, +}; + +/// Converts a ConvModeID enumerant to a string +char const *to_string(ConvModeID type, bool pretty) { + + for (auto const & possible : ConvModeID_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Converts a ConvModeID enumerant from a string +template <> +ConvModeID from_string(std::string const &str) { + + for (auto const & possible : ConvModeID_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return ConvModeID::kInvalid; +} + + +static struct { + char const *text; + char const *pretty; + IteratorAlgorithmID enumerant; +} +IteratorAlgorithmID_enumerants[] = { + {"none", "", IteratorAlgorithmID::kNone}, + {"analytic", "", IteratorAlgorithmID::kAnalytic}, + {"optimized", "", IteratorAlgorithmID::kOptimized}, + {"fixed_channels", "", IteratorAlgorithmID::kFixedChannels}, + {"few_channels", "", IteratorAlgorithmID::kFewChannels}, +}; + +/// Converts a ConvModeID enumerant to a string +char const *to_string(IteratorAlgorithmID type, bool pretty) { + + for (auto const & possible : IteratorAlgorithmID_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Converts a ConvModeID enumerant from a string +template <> +IteratorAlgorithmID from_string(std::string const &str) { + + for (auto const & possible : IteratorAlgorithmID_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return IteratorAlgorithmID::kInvalid; +} +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + ConvKind enumerant; +} +ConvKind_enumerants[] = { + {"unknown", "", ConvKind::kUnknown}, + {"fprop", "", ConvKind::kFprop}, + {"dgrad", "", ConvKind::kDgrad}, + {"wgrad", "", ConvKind::kWgrad}, +}; + +/// Converts a ConvKind enumerant to a string +char const *to_string(ConvKind type, bool pretty) { + + for (auto const & possible : ConvKind_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + + +/// Converts a ConvKind enumerant from a string +template <> +ConvKind from_string(std::string const &str) { + + for (auto const & possible : ConvKind_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return ConvKind::kInvalid; +} +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + RasterOrder enumerant; +} +RasterOrder_enumerants[] = { + {"along_n", "", RasterOrder::kAlongN}, + {"along_m", "", RasterOrder::kAlongM}, + {"heuristic", "", RasterOrder::kHeuristic}, +}; + +/// Converts a RasterOrder enumerant to a string +char const *to_string(RasterOrder type, bool pretty) { + + for (auto const & possible : RasterOrder_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + + +/// Converts a RasterOrder enumerant from a string +template <> +RasterOrder from_string(std::string const &str) { + + for (auto const & possible : RasterOrder_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return RasterOrder::kInvalid; +} +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexical cast a string to a byte array. Returns true if cast is successful or false if invalid. +bool lexical_cast(std::vector &bytes, NumericTypeID type, std::string const &str) { + int size_bytes = sizeof_bits(type) / 8; + if (!size_bytes) { + return false; + } + + bytes.resize(size_bytes, 0); + + std::stringstream ss; + ss << str; + + switch (type) { + case NumericTypeID::kU8: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kU16: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kU32: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kU64: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kS8: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kS16: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kS32: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kS64: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kFE4M3: + { + float tmp; + ss >> tmp; + *reinterpret_cast(bytes.data()) = static_cast(tmp); + } + break; + case NumericTypeID::kFE5M2: + { + float tmp; + ss >> tmp; + *reinterpret_cast(bytes.data()) = static_cast(tmp); + } + break; + case NumericTypeID::kF16: + { + float tmp; + ss >> tmp; + *reinterpret_cast(bytes.data()) = static_cast(tmp); + } + break; + case NumericTypeID::kBF16: + { + float tmp; + ss >> tmp; + *reinterpret_cast(bytes.data()) = static_cast(tmp); + } + break; + case NumericTypeID::kTF32: + { + float tmp; + ss >> tmp; + *reinterpret_cast(bytes.data()) = static_cast(tmp); + } + break; + case NumericTypeID::kF32: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kF64: + { + ss >> *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kCF16: + { + std::complex tmp; + ss >> tmp; + cutlass::complex *x = reinterpret_cast *>(bytes.data()); + x->real() = static_cast(std::real(tmp)); + x->imag() = static_cast(std::imag(tmp)); + } + break; + case NumericTypeID::kCBF16: + { + std::complex tmp; + ss >> tmp; + cutlass::complex *x = reinterpret_cast *>(bytes.data()); + x->real() = static_cast(std::real(tmp)); + x->imag() = static_cast(std::imag(tmp)); + } + break; + case NumericTypeID::kCF32: + { + ss >> *reinterpret_cast*>(bytes.data()); + } + break; + case NumericTypeID::kCTF32: + { + std::complex tmp; + ss >> tmp; + cutlass::complex *x = reinterpret_cast *>(bytes.data()); + x->real() = static_cast(std::real(tmp)); + x->imag() = static_cast(std::imag(tmp)); + } + break; + case NumericTypeID::kCF64: + { + ss >> *reinterpret_cast*>(bytes.data()); + } + break; + default: + return false; + } + + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +std::string lexical_cast(int64_t int_value) { + std::stringstream ss; + ss << int_value; + return ss.str(); +} + +/// Lexical cast TO a string FROM a byte array. Returns true if cast is successful or false if invalid. +std::string lexical_cast(std::vector &bytes, NumericTypeID type) { + + int size_bytes = sizeof_bits(type) / 8; + + if (!size_bytes || size_bytes != bytes.size()) { + return ""; + } + + bytes.resize(size_bytes, 0); + + std::stringstream ss; + + switch (type) { + case NumericTypeID::kU8: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kU16: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kU32: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kU64: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kS8: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kS16: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kS32: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kS64: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kFE4M3: + { + float tmp = *reinterpret_cast(bytes.data()); + ss << tmp; + } + break; + case NumericTypeID::kFE5M2: + { + float tmp = *reinterpret_cast(bytes.data()); + ss << tmp; + } + break; + case NumericTypeID::kF16: + { + float tmp = *reinterpret_cast(bytes.data()); + ss << tmp; + } + break; + case NumericTypeID::kBF16: + { + float tmp = *reinterpret_cast(bytes.data()); + ss << tmp; + } + break; + case NumericTypeID::kTF32: + { + float tmp = *reinterpret_cast(bytes.data()); + ss << tmp; + } + break; + case NumericTypeID::kF32: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kF64: + { + ss << *reinterpret_cast(bytes.data()); + } + break; + case NumericTypeID::kCF16: + { + cutlass::complex const *x = + reinterpret_cast const *>(bytes.data()); + + ss << float(x->real()); + + if (x->imag() != cutlass::half_t()) { + ss << "+i" << float(x->imag()); + } + } + break; + case NumericTypeID::kCBF16: + { + cutlass::complex const *x = + reinterpret_cast const *>(bytes.data()); + + ss << float(x->real()); + + if (x->imag() != cutlass::bfloat16_t()) { + ss << "+i" << float(x->imag()); + } + } + break; + case NumericTypeID::kCF32: + { + cutlass::complex const * x = reinterpret_cast const *>(bytes.data()); + + ss << x->real(); + + if (x->imag() != float()) { + ss << "+i" << x->imag(); + } + } + break; + case NumericTypeID::kCTF32: + { + cutlass::complex const * x = reinterpret_cast const *>(bytes.data()); + + ss << float(x->real()); + + if (x->imag() != tfloat32_t()) { + ss << "+i" << float(x->imag()); + } + } + break; + case NumericTypeID::kCF64: + { + cutlass::complex const * x = reinterpret_cast const *>(bytes.data()); + + ss << x->real(); + + if (x->imag() != double()) { + ss << "+i" << x->imag(); + } + } + break; + default: + return ""; + } + + return ss.str(); +} + +/// Casts from a signed int64 to the destination type. Returns true if successful. +bool cast_from_int64(std::vector &bytes, NumericTypeID type, int64_t src) { + int size_bytes = sizeof_bits(type) / 8; + if (!size_bytes) { + return false; + } + + bytes.resize(size_bytes, 0); + + switch (type) { + case NumericTypeID::kU8: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU16: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU64: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS8: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS16: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS64: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kFE4M3: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kFE5M2: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kF16: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kBF16: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kTF32: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kF32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kF64: + { + *reinterpret_cast(bytes.data()) = double(src); + } + break; + case NumericTypeID::kCF16: + { + cutlass::complex *x = reinterpret_cast *>(bytes.data()); + x->real() = static_cast(float(src)); + x->imag() = static_cast(float(0)); + } + break; + case NumericTypeID::kCF32: + { + *reinterpret_cast*>(bytes.data()) = cutlass::complex(float(src), float(0)); + } + break; + case NumericTypeID::kCF64: + { + *reinterpret_cast*>(bytes.data()) = cutlass::complex(double(src), double(0)); + } + break; + default: + return false; + } + + return true; + +} + +/// Casts from an unsigned int64 to the destination type. Returns true if successful. +bool cast_from_uint64(std::vector &bytes, NumericTypeID type, uint64_t src) { + int size_bytes = sizeof_bits(type) / 8; + if (!size_bytes) { + return false; + } + + bytes.resize(size_bytes, 0); + + switch (type) { + case NumericTypeID::kU8: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU16: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU64: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS8: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS16: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS64: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kFE4M3: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kFE5M2: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kF16: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kBF16: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kTF32: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kF32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kF64: + { + *reinterpret_cast(bytes.data()) = double(src); + } + break; + case NumericTypeID::kCF16: + { + cutlass::complex *x = reinterpret_cast *>(bytes.data()); + x->real() = static_cast(float(src)); + x->imag() = static_cast(float(0)); + } + break; + case NumericTypeID::kCF32: + { + *reinterpret_cast*>(bytes.data()) = std::complex(float(src), float(0)); + } + break; + case NumericTypeID::kCF64: + { + *reinterpret_cast*>(bytes.data()) = std::complex(double(src), double(0)); + } + break; + default: + return false; + } + + return true; + +} + +/// Lexical cast a string to a byte array. Returns true if cast is successful or false if invalid. +bool cast_from_double(std::vector &bytes, NumericTypeID type, double src) { + + int size_bytes = sizeof_bits(type) / 8; + if (!size_bytes) { + return false; + } + + bytes.resize(size_bytes, 0); + + switch (type) { + case NumericTypeID::kU8: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU16: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kU64: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS8: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS16: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kS64: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kFE4M3: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kFE5M2: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kF16: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kBF16: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kTF32: + { + *reinterpret_cast(bytes.data()) = static_cast(float(src)); + } + break; + case NumericTypeID::kF32: + { + *reinterpret_cast(bytes.data()) = static_cast(src); + } + break; + case NumericTypeID::kF64: + { + *reinterpret_cast(bytes.data()) = src; + } + break; + case NumericTypeID::kCF16: + { + cutlass::complex *x = reinterpret_cast *>(bytes.data()); + x->real() = static_cast(float(src)); + x->imag() = static_cast(float(0)); + } + break; + case NumericTypeID::kCBF16: + { + cutlass::complex *x = reinterpret_cast *>(bytes.data()); + x->real() = static_cast(bfloat16_t(src)); + x->imag() = static_cast(bfloat16_t(0)); + } + break; + case NumericTypeID::kCF32: + { + *reinterpret_cast*>(bytes.data()) = cutlass::complex(float(src), float()); + } + break; + case NumericTypeID::kCTF32: + { + *reinterpret_cast*>(bytes.data()) = cutlass::complex(tfloat32_t(src), tfloat32_t()); + } + break; + case NumericTypeID::kCF64: + { + *reinterpret_cast*>(bytes.data()) = cutlass::complex(src, double()); + } + break; + default: + return false; + } + + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..16d94db294cc88c56a517945b81fc608ef564c46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/CMakeLists.txt @@ -0,0 +1,125 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +find_package(Python3 3.5 COMPONENTS Interpreter REQUIRED) + +# +# Sources for CUTLASS Profiler Tool +# +cmake_policy(SET CMP0112 NEW) +set(CUTLASS_TOOLS_PROFILER_SOURCES + src/main.cpp + src/cutlass_profiler.cu + src/options.cu + src/performance_report.cpp + src/enumerated_types.cpp + src/gpu_timer.cpp + src/device_allocation.cu + src/device_context.cu + src/cublas_helpers.cu + src/cudnn_helpers.cpp + src/problem_space.cpp + src/operation_profiler.cu + src/gemm_operation_profiler.cu + src/rank_k_operation_profiler.cu + src/rank_2k_operation_profiler.cu + src/trmm_operation_profiler.cu + src/symm_operation_profiler.cu + src/conv2d_operation_profiler.cu + src/conv3d_operation_profiler.cu + src/sparse_gemm_operation_profiler.cu +) + +# +# Build target +# + +cutlass_add_executable( + cutlass_profiler + ${CUTLASS_TOOLS_PROFILER_SOURCES} +) +add_executable(nvidia::cutlass::profiler ALIAS cutlass_profiler) +set_target_properties(cutlass_profiler PROPERTIES EXPORT_NAME profiler) + +# +# Include paths +# + +target_include_directories( + cutlass_profiler + PRIVATE + ${CMAKE_CURRENT_LIST_DIR}/include + ) + +# +# Library dependencies +# + +target_link_libraries( + cutlass_profiler + PRIVATE + cutlass_lib + cutlass_tools_util_includes + $<$:nvidia::cublas> + $<$:nvidia::cudnn> + cudart + cuda_driver + ) + +install( + TARGETS cutlass_profiler + EXPORT NvidiaCutlass + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + ) + +set(CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_GEMM --operation=Gemm --providers=cutlass --verification-providers=cublas,device --junit-output=test_cutlass_profiler_gemm --print-kernel-before-running=true) +set(CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_CONV2D --operation=Conv2d --providers=cutlass --verification-providers=cudnn,device --junit-output=test_cutlass_profiler_conv2d --print-kernel-before-running=true) +set(CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_CONV3D --operation=Conv3d --providers=cutlass --verification-providers=cudnn,device,host --junit-output=test_cutlass_profiler_conv3d --print-kernel-before-running=true) +set(CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_SPGEMM --operation=SparseGemm --providers=cutlass --verification-providers=cublas,device,host --junit-output=test_cutlass_profiler_spgemm --print-kernel-before-running=true) +set(CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_RANK_K --operation=RankK --providers=cutlass --verification-providers=cublas --junit-output=test_cutlass_profiler_rank_k --print-kernel-before-running=true) +set(CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_RANK_2K --operation=Rank2K --providers=cutlass --verification-providers=cublas --junit-output=test_cutlass_profiler_rank_2k --print-kernel-before-running=true) +set(CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_TRMM --operation=Trmm --providers=cutlass --verification-providers=device,host --junit-output=test_cutlass_profiler_trmm --print-kernel-before-running=true) +set(CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_SYMM --operation=Symm --providers=cutlass --verification-providers=cublas,host --junit-output=test_cutlass_profiler_symm --print-kernel-before-running=true) + +cutlass_add_executable_tests( + test_profiler cutlass_profiler + DEPENDEES test_all + TEST_COMMAND_OPTIONS + GEMM + CONV2D + CONV3D + SPGEMM + RANK_K + RANK_2K + TRMM + SYMM + TEST_COMMAND_OPTIONS_PREFIX + CUTLASS_PROFILER_TEST_COMMAND_OPTIONS_ + DISABLE_EXECUTABLE_INSTALL_RULE + ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..1b0345df15d04404b4421c54aafb3fbc51656c6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h @@ -0,0 +1,495 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines profiling functionality for convolution + +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/handle.h" +#include "cutlass/library/manifest.h" +#include "cutlass/library/singleton.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" +#include "reduction_operation_profiler.h" +#if CUTLASS_ENABLE_CUDNN +#include "cudnn_helpers.h" +#endif //#if CUTLASS_ENABLE_CUDNN +#include "debug.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Abstract base class for each math function +class Conv2dOperationProfiler : public OperationProfiler { +public: + + /// Problem structure obtained from problem space + struct Conv2dProblem { + + int64_t n, h, w, c, p, q, k, r, s; + int64_t groups; + int64_t pad_h, pad_w; + int64_t stride_h, stride_w; + int64_t dilation_h, dilation_w; + + std::vector alpha; + std::vector beta; + + library::SplitKMode split_k_mode; + int64_t split_k_slices; + + library::ConvModeID conv_mode; + + library::Provider eq_gemm_provider; + + // convolution with parallel interleaved reduction + // convolution epilogue (alpha, beta) = (1.0, 0.0) + // reduction epilogue (alpha, beta) = (Conv2dProblem::alpha, Conv2dProblem::beta) + std::vector alpha_one; + std::vector beta_zero; + + // + // Methods + // + + /// Total number of bytes loaded + int64_t bytes(library::ConvDescription const &operation_desc) const; + + /// Total number of flops computed + int64_t flops(library::ConvDescription const &operation_desc) const; + + void set_default_output_size() { + p = ((h + pad_h - r * dilation_h) / stride_h) + 1; + q = ((w + pad_w - s * dilation_w) / stride_w) + 1; + } + + // Returns equivalent gemm problem size for convolution + cutlass::gemm::GemmCoord eq_gemm_size(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return cutlass::gemm::GemmCoord(int(n * p * q), int(k), int(r * s * c / groups)); + case library::ConvKind::kDgrad: return cutlass::gemm::GemmCoord(int(n * h * w), int(c), int(k * r * s)); + case library::ConvKind::kWgrad: return cutlass::gemm::GemmCoord(int(k), int(r * s * c), int(n * p * q)); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns extent for tensor A + std::vector extent_a(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return {int(n), int(h), int(w), int(c)}; + case library::ConvKind::kDgrad: return {int(n), int(p), int(q), int(k)}; + case library::ConvKind::kWgrad: return {int(n), int(p), int(q), int(k)}; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns extent for tensor B + std::vector extent_b(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return {int(k), int(r), int(s), int(c / groups)}; + case library::ConvKind::kDgrad: return {int(k), int(r), int(s), int(c)}; + case library::ConvKind::kWgrad: return {int(n), int(h), int(w), int(c)}; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns extent for tensor C + std::vector extent_c(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return {int(n), int(p), int(q), int(k)}; + case library::ConvKind::kDgrad: return {int(n), int(h), int(w), int(c)}; + case library::ConvKind::kWgrad: return {int(k), int(r), int(s), int(c)}; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns layout for equivalent gemm matrix A + library::LayoutTypeID eq_gemm_layout_a(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return library::LayoutTypeID::kRowMajor; // TN Gemm + case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm + case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; // NT Gemm + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns layout for equivalent gemm matrix B + library::LayoutTypeID eq_gemm_layout_b(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return library::LayoutTypeID::kColumnMajor; // TN Gemm + case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm + case library::ConvKind::kWgrad: return library::LayoutTypeID::kRowMajor; // NT Gemm + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns layout for equivalent gemm matrix C + library::LayoutTypeID eq_gemm_layout_c(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + // Gemm operator assumes column-major output + case library::ConvKind::kFprop: + case library::ConvKind::kDgrad: + case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns leading dimension for equivalent gemm matrix A + int64_t eq_gemm_lda(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k(); + case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).k(); + case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m(); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns leading dimension for equivalent gemm matrix B + int64_t eq_gemm_ldb(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k(); + case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).n(); + case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).n(); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns leading dimension for equivalent gemm matrix C + int64_t eq_gemm_ldc(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: + case library::ConvKind::kDgrad: + case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m(); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + }; + + /// Workspace used + struct Conv2dWorkspace { + + /// Conv device allocations + DeviceAllocation *A; + DeviceAllocation *B; + DeviceAllocation *reordered_B; + DeviceAllocation *C; + DeviceAllocation *Computed; + DeviceAllocation *Reference; + + /// Library configuration and arguments for convolution operator + library::Conv2dConfiguration configuration; + library::ConvArguments arguments; + + /// Number of copies of the problem workspace which are visited sequentially during + /// profiling to avoid camping in the last level cache. + int problem_count; + + /// Buffer used for the cutlass conv2d operations' host workspace + std::vector host_workspace; + + /// Buffer used for the cutlass operations' device workspace + DeviceAllocation device_workspace; + + /// Library configuration and arguments for reduction operator + library::ReductionConfiguration reduction_configuration; + library::ReductionArguments reduction_arguments; + + /// Buffer used for the cutlass reduction operations' host workspace + std::vector reduction_host_workspace; + + /// Host data buffers for host reference operation + /// host buffer for tensor + std::vector host_tensor_a; + + /// host buffer for tensor b + std::vector host_tensor_b; + + /// host buffer for tensor c + std::vector host_tensor_c; + + // + // Methods + // + + Conv2dWorkspace() + : A(nullptr), + B(nullptr), + reordered_B(nullptr), + C(nullptr), + Computed(nullptr), + Reference(nullptr) {} + + // Set stride vector for tensor activations, filters, output + void set_stride_vector(Conv2dProblem const &problem, + library::ConvKind const &conv_kind, + library::LayoutTypeID const &layout_a, + library::LayoutTypeID const &layout_b, + library::LayoutTypeID const &layout_c) { + std::vector stride_activations; + std::vector stride_filters; + std::vector stride_output; + + // Strides for interleaved fprop + if (conv_kind == library::ConvKind::kFprop && + ((layout_a == library::LayoutTypeID::kTensorNC32HW32 && + layout_b == library::LayoutTypeID::kTensorC32RSK32 && + layout_c == library::LayoutTypeID::kTensorNC32HW32) || + (layout_a == library::LayoutTypeID::kTensorNC64HW64 && + layout_b == library::LayoutTypeID::kTensorC64RSK64 && + layout_c == library::LayoutTypeID::kTensorNC64HW64))) { + int interleave = + (layout_a == library::LayoutTypeID::kTensorNC32HW32) ? 32 : 64; + + stride_activations.push_back(int(problem.w) * interleave); + stride_activations.push_back(int(problem.w) * int(problem.h) * + interleave); + stride_activations.push_back(int(problem.h) * int(problem.w) * + int(problem.c)); + + stride_filters.push_back(int(problem.k) * interleave); + stride_filters.push_back(int(problem.k) * int(problem.s) * interleave); + stride_filters.push_back(int(problem.k) * int(problem.s) * + int(problem.r) * interleave); + + stride_output.push_back(int(problem.q) * interleave); + stride_output.push_back(int(problem.q) * int(problem.p) * interleave); + stride_output.push_back(int(problem.q) * int(problem.p) * + int(problem.k)); + } else { + // Strides for the rest cases + stride_activations.push_back(int(problem.c)); + stride_activations.push_back(int(problem.w) * int(problem.c)); + stride_activations.push_back(int(problem.h) * int(problem.w) * + int(problem.c)); + + stride_filters.push_back(int(problem.c / problem.groups)); + stride_filters.push_back(int(problem.s) * int(problem.c / problem.groups)); + stride_filters.push_back(int(problem.r) * int(problem.s) * + int(problem.c / problem.groups)); + + stride_output.push_back(int(problem.k)); + stride_output.push_back(int(problem.q) * int(problem.k)); + stride_output.push_back(int(problem.q) * int(problem.p) * + int(problem.k)); + } + + switch (conv_kind) { + case library::ConvKind::kFprop: + configuration.stride_a = stride_activations; + configuration.stride_b = stride_filters; + configuration.stride_c = stride_output; + + break; + case library::ConvKind::kDgrad: + configuration.stride_a = stride_output; + configuration.stride_b = stride_filters; + configuration.stride_c = stride_activations; + + break; + case library::ConvKind::kWgrad: + configuration.stride_a = stride_output; + configuration.stride_b = stride_activations; + configuration.stride_c = stride_filters; + + break; + default: + throw std::runtime_error( + "Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + }; + +protected: + + // + // Data members + // + + /// CONV problem obtained from problem space + Conv2dProblem problem_; + + /// Device memory allocations + Conv2dWorkspace conv_workspace_; + + /// CUTLASS parallel reduction operation to follow this* conv2d operation + library::Operation const *reduction_op_; + +public: + // + // Methods + // + + /// Ctor + Conv2dOperationProfiler(Options const &options); + + /// Destructor + virtual ~Conv2dOperationProfiler(); + + Conv2dProblem const& problem() const { return problem_; } + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +protected: + /// Method to profile an initialized CUTLASS operation + virtual Status profile_cutlass_( + double &runtime, + Options const &options, + library::Operation const *operation, + void *arguments, + void *host_workspace, + void *device_workspace); + + + /// Initialize reduction problem dimensions and library::Operation + bool initialize_reduction_configuration_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes the performance result + void initialize_result_( + PerformanceResult &result, + Options const &options, + library::ConvDescription const &operation_desc, + ProblemSpace const &problem_space); + + /// Verifies CUTLASS against host reference + bool verify_with_host_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against device reference + bool verify_with_device_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +#if CUTLASS_ENABLE_CUDNN + + /// Verifies CUTLASS against cudnn reference + bool verify_with_cudnn_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +#endif //#if CUTLASS_ENABLE_CUDNN + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/conv3d_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/conv3d_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..130a661b8b9de9041faf18eedda98c61b2a17cec --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/conv3d_operation_profiler.h @@ -0,0 +1,449 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines profiling functionality for convolution + +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/handle.h" +#include "cutlass/library/manifest.h" +#include "cutlass/library/singleton.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" +#include "reduction_operation_profiler.h" +#if CUTLASS_ENABLE_CUDNN +#include "cudnn_helpers.h" +#endif //#if CUTLASS_ENABLE_CUDNN +#include "debug.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Abstract base class for each math function +class Conv3dOperationProfiler : public OperationProfiler { +public: + + /// Problem structure obtained from problem space + struct Conv3dProblem { + + int64_t n, d, h, w, c, z, p, q, k, t, r, s; + int64_t pad_d, pad_h, pad_w; + int64_t stride_d, stride_h, stride_w; + int64_t dilation_d, dilation_h, dilation_w; + + std::vector alpha; + std::vector beta; + + library::SplitKMode split_k_mode; + int64_t split_k_slices; + + library::ConvModeID conv_mode; + + library::Provider eq_gemm_provider; + + // convolution with parallel interleaved reduction + // convolution epilogue (alpha, beta) = (1.0, 0.0) + // reduction epilogue (alpha, beta) = (Conv3dProblem::alpha, Conv3dProblem::beta) + std::vector alpha_one; + std::vector beta_zero; + + // + // Methods + // + + /// Total number of bytes loaded + int64_t bytes(library::ConvDescription const &operation_desc) const; + + /// Total number of flops computed + int64_t flops(library::ConvDescription const &operation_desc) const; + + /// Infers output size from the input size, padding, stride, and dilation + void set_default_output_size() { + z = ((d + pad_d - t * dilation_d) / stride_d) + 1; + p = ((h + pad_h - r * dilation_h) / stride_h) + 1; + q = ((w + pad_w - s * dilation_w) / stride_w) + 1; + } + + // Returns equivalent gemm problem size for convolution + cutlass::gemm::GemmCoord eq_gemm_size(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return cutlass::gemm::GemmCoord(int(n * z * p * q), int(k), int(t * r * s * c)); + case library::ConvKind::kDgrad: return cutlass::gemm::GemmCoord(int(n * d * h * w), int(c), int(t * r * s * k)); + case library::ConvKind::kWgrad: return cutlass::gemm::GemmCoord(int(k), int(t * r * s * c), int(n * z * p * q)); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns extent for tensor A + std::vector extent_a(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return {int(n), int(d), int(h), int(w), int(c)}; + case library::ConvKind::kDgrad: return {int(n), int(z), int(p), int(q), int(k)}; + case library::ConvKind::kWgrad: return {int(n), int(z), int(p), int(q), int(k)}; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns extent for tensor B + std::vector extent_b(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return {int(k), int(t), int(r), int(s), int(c)}; + case library::ConvKind::kDgrad: return {int(k), int(t), int(r), int(s), int(c)}; + case library::ConvKind::kWgrad: return {int(n), int(d), int(h), int(w), int(c)}; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns extent for tensor C + std::vector extent_c(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return {int(n), int(z), int(p), int(q), int(k)}; + case library::ConvKind::kDgrad: return {int(n), int(d), int(h), int(w), int(c)}; + case library::ConvKind::kWgrad: return {int(k), int(t), int(r), int(s), int(c)}; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns layout for equivalent gemm matrix A + library::LayoutTypeID eq_gemm_layout_a(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return library::LayoutTypeID::kRowMajor; // TN Gemm + case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm + case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; // NT Gemm + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns layout for equivalent gemm matrix B + library::LayoutTypeID eq_gemm_layout_b(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return library::LayoutTypeID::kColumnMajor; // TN Gemm + case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm + case library::ConvKind::kWgrad: return library::LayoutTypeID::kRowMajor; // NT Gemm + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns layout for equivalent gemm matrix C + library::LayoutTypeID eq_gemm_layout_c(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + // Gemm operator assumes column-major output + case library::ConvKind::kFprop: + case library::ConvKind::kDgrad: + case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns leading dimension for equivalent gemm matrix A + int64_t eq_gemm_lda(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k(); + case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).k(); + case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m(); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns leading dimension for equivalent gemm matrix B + int64_t eq_gemm_ldb(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k(); + case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).n(); + case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).n(); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns leading dimension for equivalent gemm matrix C + int64_t eq_gemm_ldc(library::ConvKind const &conv_kind) const { + + switch (conv_kind) { + case library::ConvKind::kFprop: + case library::ConvKind::kDgrad: + case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m(); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + }; + + /// Workspace used + struct Conv2dWorkspace { + + /// Conv device allocations + DeviceAllocation *A; + DeviceAllocation *B; + DeviceAllocation *C; + DeviceAllocation *Computed; + DeviceAllocation *Reference; + + /// Library configuration and arguments for convolution operator + library::Conv3dConfiguration configuration; + library::ConvArguments arguments; + + /// Number of copies of the problem workspace which are visited sequentially during + /// profiling to avoid camping in the last level cache. + int problem_count; + + /// Buffer used for the cutlass conv2d operations' host workspace + std::vector host_workspace; + + /// Buffer used for the cutlass operations' device workspace + DeviceAllocation device_workspace; + + /// Library configuration and arguments for reduction operator + library::ReductionConfiguration reduction_configuration; + library::ReductionArguments reduction_arguments; + + /// Buffer used for the cutlass reduction operations' host workspace + std::vector reduction_host_workspace; + + /// Host data buffers for host reference operation + /// host buffer for tensor + std::vector host_tensor_a; + + /// host buffer for tensor b + std::vector host_tensor_b; + + /// host buffer for tensor c + std::vector host_tensor_c; + + + // + // Methods + // + + Conv2dWorkspace(): + A(nullptr), B(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) { } + + // Returns stride vector for tensor A + std::vector stride_a(library::ConvKind const &conv_kind) { + return { + configuration.layout_a(conv_kind).stride()[0], + configuration.layout_a(conv_kind).stride()[1], + configuration.layout_a(conv_kind).stride()[2], + configuration.layout_a(conv_kind).stride()[3] + }; + } + + // Returns stride vector for tensor B + std::vector stride_b(library::ConvKind const &conv_kind) { + + return { + configuration.layout_b(conv_kind).stride()[0], + configuration.layout_b(conv_kind).stride()[1], + configuration.layout_b(conv_kind).stride()[2], + configuration.layout_b(conv_kind).stride()[3] + }; + } + + // Returns stride vector for tensor C + std::vector stride_c(library::ConvKind const &conv_kind) { + + return { + configuration.layout_c(conv_kind).stride()[0], + configuration.layout_c(conv_kind).stride()[1], + configuration.layout_c(conv_kind).stride()[2], + configuration.layout_c(conv_kind).stride()[3] + }; + } + }; + +protected: + + // + // Data members + // + + /// CONV problem obtained from problem space + Conv3dProblem problem_; + + /// Device memory allocations + Conv2dWorkspace conv_workspace_; + + /// CUTLASS parallel reduction operation to follow this* conv2d operation + library::Operation const *reduction_op_; + +public: + // + // Methods + // + + /// Ctor + Conv3dOperationProfiler(Options const &options); + + /// Destructor + virtual ~Conv3dOperationProfiler(); + + Conv3dProblem const& problem() const { return problem_; } + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +protected: + + /// Updates the arguments structure for the CUTLASS operator based on + /// the problem index. + void set_cutlass_operator_arguments_(int problem_idx = 0); + + /// Method to profile an initialized CUTLASS operation + virtual Status profile_cutlass_( + double &runtime, + Options const &options, + library::Operation const *operation, + void *arguments, + void *host_workspace, + void *device_workspace); + + /// Initialize reduction problem dimensions and library::Operation + bool initialize_reduction_configuration_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes the performance result + void initialize_result_( + PerformanceResult &result, + Options const &options, + library::ConvDescription const &operation_desc, + ProblemSpace const &problem_space); + + /// Verifies CUTLASS against host reference + bool verify_with_host_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against device reference + bool verify_with_device_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +#if CUTLASS_ENABLE_CUDNN + + /// Verifies CUTLASS against cudnn reference + bool verify_with_cudnn_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +#endif //#if CUTLASS_ENABLE_CUDNN + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cublas_helpers.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cublas_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..3f38adbcdc2792db5162f921d820b866bf80d27b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cublas_helpers.h @@ -0,0 +1,358 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Helper functions for mapping CUTLASS concepts to cuBLAS. +*/ + +#pragma once + +#if CUTLASS_ENABLE_CUBLAS +#include + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/blas3.h" + +#include "options.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Converts a cuBLAS status to cutlass::Status +Status get_cutlass_status(cublasStatus_t cublas); + +/// Converts a cuBLAS status to cutlass::profiler::Disposition +Disposition get_cutlass_disposition(cublasStatus_t cublas_status); + +/// Maps a CUTLASS tensor layout to a cuBLAS transpose operation +bool get_cublas_transpose_operation( + cublasOperation_t &operation, + library::LayoutTypeID layout, + library::ComplexTransform transform = library::ComplexTransform::kNone); + +/// Maps a CUTLASS numeric type to a cuBLAS data type enumeration +bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type); + +/// Gets the cublas algorithm given threadblock tile dimensions and math opcode class +cublasGemmAlgo_t get_cublas_gemm_algo( + int cta_m, + int cta_n, + int cta_k, + library::OpcodeClassID opcode_class); + +/// Returns a status if cuBLAS can satisfy a particular GEMM description +Status cublas_satisfies(library::GemmDescription const &desc); + +/// Returns a status if cuBLAS can satisfy a particular RankK description +Status cublas_satisfies(library::RankKDescription const &desc); + +/// Returns a status if cuBLAS can satisfy a particular TRMM description +Status cublas_satisfies(library::TrmmDescription const &desc); + +/// Returns a status if cuBLAS can satisfy a particular SYMM/HEMM description +Status cublas_satisfies(library::SymmDescription const &desc); + +/// This is a helper class to create cublasHandle_t automatically on CublasCreate object creation and +/// to destroy cublasHandle_t on CublasCreate object destruction. +/// Additionally, it provides implicit cast from CublasCreate's object to cublasHandle_t's object +class CublasCreate { +private: + cublasHandle_t handle; + cublasStatus_t status; + +public: + CublasCreate() { + status = cublasCreate(&handle); + } + + ~CublasCreate() { + cublasDestroy(handle); + } + + /// Implicit cast CublasCreate object to cublasHandle_t + operator cublasHandle_t() const { return handle; } + + /// returns cublasStatus_t for handle creation + cublasStatus_t get_cublas_create_status() { return status; } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Selects one or more cuBLAS algorithms. +static void select_cublas_algorithms( + std::vector &algorithms, + Options const &options, + library::GemmDescription const &op_desc) { + + library::OpcodeClassID const & opcode_class = + op_desc.tile_description.math_instruction.opcode_class; + + switch (options.library.algorithm_mode) { + case AlgorithmMode::kMatching: + { + algorithms.push_back(get_cublas_gemm_algo( + op_desc.tile_description.threadblock_shape.m(), + op_desc.tile_description.threadblock_shape.n(), + op_desc.tile_description.threadblock_shape.k(), + opcode_class)); + break; + } + + case AlgorithmMode::kBest: + { + // Choose first enumerated mode. If none are enumerated, choose based on opcode class + // and evaluate all of them. + + if (options.library.algorithms.empty()) { + // Enumerate all algorithms + if (opcode_class == library::OpcodeClassID::kSimt) { + + for (int algo = CUBLAS_GEMM_DEFAULT; + algo <= CUBLAS_GEMM_ALGO23; + ++algo) { + + algorithms.push_back(cublasGemmAlgo_t(algo)); + } + } + else { + + for (int algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; + algo <= CUBLAS_GEMM_ALGO15_TENSOR_OP; + ++algo) { + + algorithms.push_back(cublasGemmAlgo_t(algo)); + } + } + } + else { + // Use the listed algorithms + algorithms.reserve(options.library.algorithms.size()); + + for (int algo : options.library.algorithms) { + algorithms.push_back(reinterpret_cast(algo)); + } + } + + break; + } + + case AlgorithmMode::kDefault: + { + + // Use the library's default algorithm + algorithms.push_back((opcode_class == library::OpcodeClassID::kSimt ? + CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP)); + + break; + } + default: + { + break; + } + } +} + +/// Dispatcher to cublasGemmEx() +struct cublasGemmExDispatcher { + + // + // Data members + // + library::GemmUniversalConfiguration configuration; + library::GemmUniversalArguments arguments; + + // cublas-specific data structures to fill cublas API call arguments + cublasOperation_t trans_A; + cublasOperation_t trans_B; + cudaDataType_t data_type_A; + cudaDataType_t data_type_B; + cudaDataType_t data_type_C; + cudaDataType_t compute_data_type; + +#if (__CUDACC_VER_MAJOR__ >= 11) + cublasComputeType_t compute_type; +#endif + + cublasGemmAlgo_t algo; + Status status; + + // + // Methods + // + + cublasGemmExDispatcher( + library::GemmDescription const &op_desc, + library::GemmUniversalConfiguration configuration_, + library::GemmUniversalArguments arguments_, + cublasGemmAlgo_t algorithm = CUBLAS_GEMM_DFALT + ); + + /// Executes GEMM using these arguments + cublasStatus_t operator()(cublasHandle_t handle); +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Dispatcher to cublas rank k update kernels +struct cublasRankKDispatcher { + + // + // Data members + // + library::RankKConfiguration configuration; + library::RankKArguments arguments; + + // cublas-specific data structures to fill cublas API call arguments + cublasOperation_t trans_A; + cublasFillMode_t uplo; + cudaDataType_t data_type_A; + cudaDataType_t data_type_C; + cudaDataType_t compute_data_type; + +#if (__CUDACC_VER_MAJOR__ >= 11) + cublasComputeType_t compute_type; +#endif + + int num_ranks; //(rank-k or rank-2k) + BlasMode blas_mode; //(symmetric or hermitian) + Status status; + + // + // Methods + // + + cublasRankKDispatcher( + library::RankKDescription const &op_desc, + library::RankKConfiguration configuration_, + library::RankKArguments arguments_ + ); + + /// Executes RankK using these arguments + cublasStatus_t operator()(cublasHandle_t handle); +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Dispatcher to cublasTrmm() +struct cublasTrmmDispatcher { + + // + // Data members + // + library::TrmmConfiguration configuration; + library::TrmmArguments arguments; + + // cublas-specific data structures to fill cublas API call arguments + cublasOperation_t trans_A; + cublasSideMode_t side; + cublasFillMode_t uplo; + cublasDiagType_t diag; + cudaDataType_t data_type_A; + cudaDataType_t data_type_B; + cudaDataType_t data_type_D; + cudaDataType_t compute_data_type; + +#if (__CUDACC_VER_MAJOR__ >= 11) + cublasComputeType_t compute_type; +#endif + + Status status; + + // + // Methods + // + + cublasTrmmDispatcher( + library::TrmmDescription const &op_desc, + library::TrmmConfiguration configuration_, + library::TrmmArguments arguments_ + ); + + /// Executes TRMM using these arguments + cublasStatus_t operator()(cublasHandle_t handle); +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Dispatcher to cublas symm/hemm update kernels +struct cublasSymmDispatcher { + + // + // Data members + // + library::SymmConfiguration configuration; + library::SymmArguments arguments; + + // cublas-specific data structures to fill cublas API call arguments + cublasSideMode_t side; + cublasFillMode_t uplo; + cudaDataType_t data_type_A; + cudaDataType_t data_type_B; + cudaDataType_t data_type_C; + cudaDataType_t compute_data_type; + +#if (__CUDACC_VER_MAJOR__ >= 11) + cublasComputeType_t compute_type; +#endif + + BlasMode blas_mode; //(symmetric or hermitian) + Status status; + + // + // Methods + // + + cublasSymmDispatcher( + library::SymmDescription const &op_desc, + library::SymmConfiguration configuration_, + library::SymmArguments arguments_ + ); + + /// Executes Symm using these arguments + cublasStatus_t operator()(cublasHandle_t handle); +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace detail + +} // namespace profiler +} // namespace cutlass + + +#endif // #if CUTLASS_ENABLE_CUBLAS diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cudnn_helpers.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cudnn_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..d5a9af7be504b6a74db26e3a8825cb463140eeeb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cudnn_helpers.h @@ -0,0 +1,590 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Helper functions for mapping CUTLASS concepts to cuDNN. + +*/ + +#pragma once +#if CUTLASS_ENABLE_CUDNN +#include +#include +#include +#include "cutlass/cutlass.h" +#include "cutlass/util/device_memory.h" +#include "cutlass/library/library.h" +#include "enumerated_types.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Converts a cuDNN status to cutlass::Status +Status get_cutlass_status(cudnnStatus_t cudnn_status); + +/// Converts a cuDNN status to cutlass::profiler::Disposition +Disposition get_cutlass_disposition(cudnnStatus_t cudnn_status); + +/// Checks cudnnStatus_t converts to cutlas status and returns if Status::kSuccess o.w. throws exception +Status checkCudnnErr(cudnnStatus_t cudnn_status); + +/// Maps a CUTLASS conv mode to a cuDNN conv mode enumeration +bool get_cudnn_conv_mode(cudnnConvolutionMode_t &cudnn_conv_mode, conv::Mode conv_mode); + +/// Maps a CUTLASS layout type to a cuDNN data type enumeration +bool get_cudnn_layout(cudnnTensorFormat_t &cudnn_layout, library::LayoutTypeID layout); + +/// Maps a CUTLASS numeric type to a cuDNN data type enumeration +bool get_cudnn_datatype(cudnnDataType_t &cudnn_element_type, library::NumericTypeID element_type); + +/// Maps CUTLASS math OpcodeClassID and MathOperationID to cuDNN math_type +bool get_cudnn_mathtype(cudnnMathType_t &cudnn_math_type, library::ConvDescription const &conv_desc); + +/// Returns a status if cudnn can satisfy a particular Conv2d description +Status cudnn_satisfies(library::ConvDescription const &desc, library::Conv2dConfiguration const &configuration); + +/// Returns a status if cudnn can satisfy a particular Conv3d description +Status cudnn_satisfies(library::ConvDescription const &desc, library::Conv3dConfiguration const &configuration); + +/// Cudnn compute type seems to be hardcoded to float (To handle a possible cudnn issue) +float cast_cudnn_compute_type_to_float(library::NumericTypeID type, void const * src); + + +/// This is a helper class to create cudnnHandle_t automatically on CudnnCreate object creation and +/// to destroy cudnnHandle_t on CudnnCreate object destruction. +/// Additionally, it provides implicit cast from CudnnCreate's object to cudnnHandle_t's object +class CudnnCreate { +private: + cudnnHandle_t handle; + cudnnStatus_t status; + +public: + CudnnCreate() { + status = cudnnCreate(&handle); + } + + ~CudnnCreate() { + cudnnDestroy(handle); + } + + /// Implicit cast CudnnCreate object to cudnnHandle_t + operator cudnnHandle_t() const { return handle; } + + /// returns cudnnStatus_t for handle creation + cudnnStatus_t get_cudnn_create_status() { return status; } +}; + + +namespace detail { + +/// Dispatcher to cudnn convolution operators +struct cudnnConvDispatcher { + + // + // Data members + // + //library::Conv2dConfiguration configuration; + library::ConvArguments arguments; + library::ConvKind conv_kind; + + // cudnn-specific data structures to fill cudnn API call arguments + // cudnn activation, filter, and output descriptors + cudnnTensorDescriptor_t activation_desc; + cudnnFilterDescriptor_t filter_desc; + cudnnTensorDescriptor_t output_desc; + cudnnConvolutionDescriptor_t conv_desc; + + // cudnn datatypes + cudnnDataType_t data_type_activation; + cudnnDataType_t data_type_filter; + cudnnDataType_t data_type_output; + + // cudnn layouts + cudnnTensorFormat_t layout_activation; + cudnnTensorFormat_t layout_filter; + cudnnTensorFormat_t layout_output; + + // cudnn convolution mode + cudnnConvolutionMode_t conv_mode; + + // cudnn math type (tensorop, tensorop with conversion, simt) + cudnnMathType_t math_type; + + // cudnn compute data type + cudnnDataType_t compute_type; + + // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue) + float alpha; + float beta; + + // cudnn workspace + size_t workspace_size_in_bytes = 0; + cutlass::device_memory::allocation workspace; + + // select cudnn's implicit gemm precomputed algorithm with tensor operations + static cudnnConvolutionFwdAlgo_t const fprop_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; + static cudnnConvolutionBwdDataAlgo_t const dgrad_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; + static cudnnConvolutionBwdFilterAlgo_t const wgrad_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; + + Status status; + + // + // Methods + // + + // TODO: unify ctor cudnnConvDispatcher for conv2d and conv3d by unifying Conv2dConfiguration + + // ctor for conv2d + cudnnConvDispatcher( + library::ConvDescription const &op_desc, + library::Conv2dConfiguration configuration, + library::ConvArguments arguments_, + cudnnHandle_t handle + ): + //configuration(configuration_), + arguments(arguments_), + conv_kind(op_desc.conv_kind), + status(Status::kSuccess) { + + bool good = true; + + // Get cudnn datatype, layout, and convolution mode from library::ConvDescription + good = (good && get_cudnn_datatype(data_type_activation, op_desc.A.element)); + good = (good && get_cudnn_datatype(data_type_filter, op_desc.B.element)); + good = (good && get_cudnn_datatype(data_type_output, op_desc.C.element)); + good = (good && get_cudnn_layout(layout_activation, op_desc.A.layout)); + good = (good && get_cudnn_layout(layout_filter, op_desc.B.layout)); + good = (good && get_cudnn_layout(layout_output, op_desc.C.layout)); + good = (good && get_cudnn_conv_mode(conv_mode, configuration.problem_size.mode)); + // Get cudnn mathtype (cudnnMathType_t) + good = (good && get_cudnn_mathtype(math_type, op_desc)); + good = (good && get_cudnn_datatype( + compute_type, + op_desc.tile_description.math_instruction.element_accumulator)); + // Check cutlass Conv2d description has equivalent operator in cudnn + if (!good) { + status = Status::kErrorNotSupported; + return; + } + // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue) + alpha = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.alpha); + beta = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.beta); + + // Create convolution descriptor object + status = get_cutlass_status(cudnnCreateConvolutionDescriptor(&conv_desc)); + + // Configure convolution operator + std::vector padding {configuration.problem_size.pad_h, configuration.problem_size.pad_w}; + std::vector stride {configuration.problem_size.stride_h, configuration.problem_size.stride_w}; + std::vector dilation {configuration.problem_size.dilation_h, configuration.problem_size.dilation_w}; + + status = get_cutlass_status( + cudnnSetConvolutionNdDescriptor( + conv_desc, + op_desc.conv_dim, + padding.data(), + stride.data(), + dilation.data(), + conv_mode, + compute_type + )); + + // Set groups + status = get_cutlass_status(cudnnSetConvolutionGroupCount(conv_desc, configuration.problem_size.groups)); + + // Create activation, filter, and output descriptor objects + status = get_cutlass_status(cudnnCreateTensorDescriptor(&activation_desc)); + status = get_cutlass_status(cudnnCreateFilterDescriptor(&filter_desc)); + status = get_cutlass_status(cudnnCreateTensorDescriptor(&output_desc)); + + // Set activation, filter, and output descriptor + status = get_cutlass_status( + cudnnSetTensor4dDescriptor( + activation_desc, + layout_activation, + data_type_activation, + configuration.problem_size.N, + configuration.problem_size.C, + configuration.problem_size.H, + configuration.problem_size.W + )); + + status = get_cutlass_status( + cudnnSetFilter4dDescriptor( + filter_desc, + data_type_filter, + layout_filter, + configuration.problem_size.K, + configuration.problem_size.C / configuration.problem_size.groups, + configuration.problem_size.R, + configuration.problem_size.S + )); + + status = get_cutlass_status( + cudnnSetTensor4dDescriptor( + output_desc, + layout_output, + data_type_output, + configuration.problem_size.N, + configuration.problem_size.K, + configuration.problem_size.P, + configuration.problem_size.Q + )); + + // Set math instruction to tensor op + status = get_cutlass_status( + cudnnSetConvolutionMathType(conv_desc, math_type)); + + // Initialize workspace + switch (conv_kind) { + case library::ConvKind::kFprop: + status = get_cutlass_status( + cudnnGetConvolutionForwardWorkspaceSize( + handle, + activation_desc, + filter_desc, + conv_desc, + output_desc, + fprop_algo, + &workspace_size_in_bytes + )); break; + case library::ConvKind::kDgrad: + status = get_cutlass_status( + cudnnGetConvolutionBackwardDataWorkspaceSize( + handle, + filter_desc, + output_desc, + conv_desc, + activation_desc, + dgrad_algo, + &workspace_size_in_bytes + )); break; + case library::ConvKind::kWgrad: + status = get_cutlass_status( + cudnnGetConvolutionBackwardFilterWorkspaceSize( + handle, + activation_desc, + output_desc, + conv_desc, + filter_desc, + wgrad_algo, + &workspace_size_in_bytes + )); break; + + } + + workspace = cutlass::device_memory::allocation(workspace_size_in_bytes); + } + + + // ctor for conv3d + cudnnConvDispatcher( + library::ConvDescription const &op_desc, + library::Conv3dConfiguration configuration, + library::ConvArguments arguments_, + cudnnHandle_t handle + ): + //configuration(configuration_), + arguments(arguments_), + conv_kind(op_desc.conv_kind), + status(Status::kSuccess) { + + bool good = true; + + // Get cudnn datatype, layout, and convolution mode from library::ConvDescription + good = (good && get_cudnn_datatype(data_type_activation, op_desc.A.element)); + good = (good && get_cudnn_datatype(data_type_filter, op_desc.B.element)); + good = (good && get_cudnn_datatype(data_type_output, op_desc.C.element)); + + good = (good && get_cudnn_layout(layout_activation, op_desc.A.layout)); + good = (good && get_cudnn_layout(layout_filter, op_desc.B.layout)); + good = (good && get_cudnn_layout(layout_output, op_desc.C.layout)); + + good = (good && get_cudnn_conv_mode(conv_mode, configuration.problem_size.mode)); + + // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue) + alpha = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.alpha); + beta = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.beta); + + good = (good && get_cudnn_datatype( + compute_type, + op_desc.tile_description.math_instruction.element_accumulator)); + + // Check cutlass Conv2d description has equivalent operator in cudnn + if (!good) { + status = Status::kErrorNotSupported; + } + + // Create convolution descriptor object + status = get_cutlass_status(cudnnCreateConvolutionDescriptor(&conv_desc)); + + // Configure convolution operator + std::vector padding {configuration.problem_size.pad_d, configuration.problem_size.pad_h, configuration.problem_size.pad_w}; + std::vector stride {configuration.problem_size.stride_d, configuration.problem_size.stride_h, configuration.problem_size.stride_w}; + std::vector dilation {configuration.problem_size.dilation_d, configuration.problem_size.dilation_h, configuration.problem_size.dilation_w}; + + status = get_cutlass_status( + cudnnSetConvolutionNdDescriptor( + conv_desc, + op_desc.conv_dim, + padding.data(), + stride.data(), + dilation.data(), + conv_mode, + compute_type + )); + + // Set groups + status = get_cutlass_status(cudnnSetConvolutionGroupCount(conv_desc, configuration.problem_size.groups)); + + // Create activation, filter, and output descriptor objects + status = get_cutlass_status(cudnnCreateTensorDescriptor(&activation_desc)); + status = get_cutlass_status(cudnnCreateFilterDescriptor(&filter_desc)); + status = get_cutlass_status(cudnnCreateTensorDescriptor(&output_desc)); + + // Set activation descriptor + std::vector activation_extent { + configuration.problem_size.N, + configuration.problem_size.C, + configuration.problem_size.D, + configuration.problem_size.H, + configuration.problem_size.W + }; + + std::vector activation_stride { + configuration.layout_activations.stride()[3], + 1, + configuration.layout_activations.stride()[2], + configuration.layout_activations.stride()[1], + configuration.layout_activations.stride()[0] + }; + + status = get_cutlass_status( + cudnnSetTensorNdDescriptor( + activation_desc, + data_type_activation, + op_desc.conv_dim + 2, + activation_extent.data(), + activation_stride.data() + )); + + // Set filter descriptor + std::vector filter_extent { + configuration.problem_size.K, + configuration.problem_size.C, + configuration.problem_size.T, + configuration.problem_size.R, + configuration.problem_size.S + }; + + std::vector filter_stride { + configuration.layout_filters.stride()[3], + 1, + configuration.layout_filters.stride()[2], + configuration.layout_filters.stride()[1], + configuration.layout_filters.stride()[0] + }; + + status = get_cutlass_status( + cudnnSetFilterNdDescriptor( + filter_desc, + data_type_filter, + layout_filter, + op_desc.conv_dim + 2, + filter_extent.data() + )); + + + // Set output descriptor + std::vector output_extent { + configuration.problem_size.N, + configuration.problem_size.K, + configuration.problem_size.Z, + configuration.problem_size.P, + configuration.problem_size.Q + }; + + std::vector output_stride { + configuration.layout_output.stride()[3], + 1, + configuration.layout_output.stride()[2], + configuration.layout_output.stride()[1], + configuration.layout_output.stride()[0] + }; + + status = get_cutlass_status( + cudnnSetTensorNdDescriptor( + output_desc, + data_type_output, + op_desc.conv_dim + 2, + output_extent.data(), + output_stride.data() + )); + + // Set math instruction to tensor op + status = get_cutlass_status( + cudnnSetConvolutionMathType(conv_desc, math_type)); + + // Initialize workspace + switch (conv_kind) { + case library::ConvKind::kFprop: + status = get_cutlass_status( + cudnnGetConvolutionForwardWorkspaceSize( + handle, + activation_desc, + filter_desc, + conv_desc, + output_desc, + fprop_algo, + &workspace_size_in_bytes + )); break; + case library::ConvKind::kDgrad: + status = get_cutlass_status( + cudnnGetConvolutionBackwardDataWorkspaceSize( + handle, + filter_desc, + output_desc, + conv_desc, + activation_desc, + dgrad_algo, + &workspace_size_in_bytes + )); break; + case library::ConvKind::kWgrad: + status = get_cutlass_status( + cudnnGetConvolutionBackwardFilterWorkspaceSize( + handle, + activation_desc, + output_desc, + conv_desc, + filter_desc, + wgrad_algo, + &workspace_size_in_bytes + )); break; + + } + + workspace = cutlass::device_memory::allocation(workspace_size_in_bytes); + } + + /// Executes Conv2d operator from cudnn library + cudnnStatus_t operator()(cudnnHandle_t handle) { + + switch (conv_kind) { + case library::ConvKind::kFprop: + return cudnnConvolutionForward( + handle, + &alpha, + activation_desc, + activation(), + filter_desc, + filter(), + conv_desc, + fprop_algo, + workspace.get(), + workspace_size_in_bytes, + &beta, + output_desc, + arguments.D + ); + case library::ConvKind::kDgrad: + return cudnnConvolutionBackwardData( + handle, + &alpha, + filter_desc, + filter(), + output_desc, + output(), + conv_desc, + dgrad_algo, + workspace.get(), + workspace_size_in_bytes, + &beta, + activation_desc, + arguments.D + ); + case library::ConvKind::kWgrad: + return cudnnConvolutionBackwardFilter( + handle, + &alpha, + activation_desc, + activation(), + output_desc, + output(), + conv_desc, + wgrad_algo, + workspace.get(), + workspace_size_in_bytes, + &beta, + filter_desc, + arguments.D + ); + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns Activation Tensor + void const * activation() const { + switch(conv_kind) { + case library::ConvKind::kFprop : return arguments.A; + case library::ConvKind::kDgrad : return arguments.C; + case library::ConvKind::kWgrad : return arguments.B; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns Filter Tensor + void const *filter() const { + switch(conv_kind) { + case library::ConvKind::kFprop : return arguments.B; + case library::ConvKind::kDgrad : return arguments.B; + case library::ConvKind::kWgrad : return arguments.C; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } + + // Returns Output Tensor + void const *output() const { + switch(conv_kind) { + case library::ConvKind::kFprop : return arguments.C; + case library::ConvKind::kDgrad : return arguments.A; + case library::ConvKind::kWgrad : return arguments.A; + default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); + } + } +}; + +} // namespace detail +///////////////////////////////////////////////////////////////////////////////////////////////// +#endif //#if CUTLASS_ENABLE_CUDNN +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cutlass_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cutlass_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..a3b06409622a34a722b89fcaf6441e030b8775b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cutlass_profiler.h @@ -0,0 +1,96 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment +*/ + +#pragma once +// CUTLASS Library includes +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" +#include "cutlass/library/singleton.h" + +#include "options.h" +#include "operation_profiler.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// CUTLASS Profiler application +class CutlassProfiler { +private: + + // + // Data members + // + + /// Performance testbench options + Options options_; + + /// Entry points for each operation + OperationProfilerVector operation_profilers_; + +private: + + /// Prints usage + void print_usage_(std::ostream &); + + /// Prints usage + void print_options_(std::ostream &); + + /// Initializes the device + void initialize_device_(); + + /// Enumerates all operations + void enumerate_(); + + /// Profiles all operations + int profile_(); + +public: + + CutlassProfiler(Options const &options); + ~CutlassProfiler(); + + /// Invokes profiling operations + int operator()(); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/debug.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..cd80c46ffe0daff7ebad703863df38e9e50a8e1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/debug.h @@ -0,0 +1,56 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief +*/ + +#pragma once + +#include + +//#define report(x) { std::cout << "\033[31m" << __FILE__ << ":" << __LINE__ << " " << x << "\033[0m" << std::endl; } +//#define report(x) {} + +// Enable/Disable Profiler debug prints +//#define DEBUG_PROFILER + +//RED 31m // profiler prints debug messages in red +//YELLOW 33m // ir prints debug messages in yellow + +#ifndef DEBUG_PROFILER +#define debugprof(...) +#else +#define debugprof(...) do { \ + printf("\033[33m[DEBUG PROF] %s:%d | ", __FILE__, __LINE__); \ + printf(__VA_ARGS__); \ + printf("\033[0m\n"); \ + } while (0) +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/device_allocation.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/device_allocation.h new file mode 100644 index 0000000000000000000000000000000000000000..b5b3ee4af5aa243b4775cda2c80c46482283bfce --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/device_allocation.h @@ -0,0 +1,232 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment +*/ + +#pragma once + +#include +#include +#include + +#include "cutlass/library/library.h" +#include "cutlass/util/distribution.h" + +#include "enumerated_types.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Device memory allocation +class DeviceAllocation { +private: + + /// Data type of contained elements + library::NumericTypeID type_; + + /// Gets the stride between elements + size_t batch_stride_; + + /// Capacity in elements of device allocation + size_t capacity_; + + /// Pointer to device memory + void *pointer_; + + /// Layout type ID + library::LayoutTypeID layout_; + + /// Stride vector + std::vector stride_; + + /// Extent vector + std::vector extent_; + + /// Support allocating a 'batch' of non-overlapping tensors in contiguous memory + int batch_count_; + + /// Buffer holding TensorRef instance to recently allocated memory + std::vector tensor_ref_buffer_; + +public: + // + // Static member functions + // + + /// Determines the number of bytes needed to represent this numeric type + static size_t bytes(library::NumericTypeID type, size_t capacity); + + /// Returns the stride of a packed layout + static std::vector get_packed_layout( + library::LayoutTypeID layout_id, + std::vector const &extent); + + /// returns the capacity needed + static size_t construct_layout( + void *bytes, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector &stride); + + /// Returns true if two blocks have exactly the same value + static bool block_compare_equal( + library::NumericTypeID numeric_type, + void const *ptr_A, + void const *ptr_B, + size_t capacity); + + /// Returns true if two blocks have approximately the same value + static bool block_compare_relatively_equal( + library::NumericTypeID numeric_type, + void const *ptr_A, + void const *ptr_B, + size_t capacity, + double epsilon, + double nonzero_floor); + +public: + // + // Methods + // + + DeviceAllocation(); + + DeviceAllocation(library::NumericTypeID type, size_t capacity); + + DeviceAllocation( + library::NumericTypeID type, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector const &stride = std::vector(), + int batch_count = 1); + + ~DeviceAllocation(); + + DeviceAllocation &reset(); + + /// Allocates device memory of a given type and capacity + DeviceAllocation &reset(library::NumericTypeID type, size_t capacity); + + /// Allocates memory for a given layout and tensor + DeviceAllocation &reset( + library::NumericTypeID type, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector const &stride = std::vector(), + int batch_count = 1); + + /// Returns a buffer owning the tensor reference + std::vector &tensor_ref() { + return tensor_ref_buffer_; + } + + bool good() const; + + /// Data type of contained elements + library::NumericTypeID type() const; + + /// Pointer to start of device memory allocation + void *data() const; + + /// Pointer to the first element of a batch + void *batch_data(int batch_idx) const; + + /// Gets the layout type + library::LayoutTypeID layout() const; + + /// Gets the stride vector + std::vector const & stride() const; + + /// Gets the extent vector + std::vector const & extent() const; + + /// Gets the number of adjacent tensors in memory + int batch_count() const; + + /// Gets the stride (in units of elements) between items + int64_t batch_stride() const; + + /// Gets the stride (in units of bytes) between items + int64_t batch_stride_bytes() const; + + /// Capacity of allocation in number of elements + size_t capacity() const; + + /// Capacity of allocation in bytes + size_t bytes() const; + + /// Initializes a device allocation to a random distribution using cuRAND + void initialize_random_device(int seed, Distribution dist); + + /// Initializes a host allocation to a random distribution using std::cout + void initialize_random_host(int seed, Distribution dist); + + /// Initializes a device allocation to a sequential distribution + void initialize_sequential_device(Distribution dist); + + /// Initializes a host allocation to a sequential distribution + void initialize_sequential_host(Distribution dist); + + /// Initializes a device allocation to a random distribution using cuRAND + void initialize_random_sparsemeta_device(int seed, int MetaSizeInBits); + + /// Initializes a host allocation to a random distribution using std::cout + void initialize_random_sparsemeta_host(int seed, int MetaSizeInBits); + + /// Uniformly fills a tensor with a value when provided o.w. zero + void fill(double value); + + /// Copies from an equivalent-sized tensor in device memory + void copy_from_device(void const *ptr); + + /// Copies from an equivalent-sized tensor in device memory + void copy_from_host(void const *ptr); + + /// Copies from an equivalent-sized tensor in device memory + void copy_to_host(void *ptr); + + /// Writes a tensor to csv + void write_tensor_csv(std::ostream &out); +}; + +using DeviceAllocationList = std::list; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/device_context.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/device_context.h new file mode 100644 index 0000000000000000000000000000000000000000..1f21dc3c645f0dc50a9a7d25c0a6d794b4d40878 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/device_context.h @@ -0,0 +1,130 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief +*/ + +#pragma once + +#include +#include + + +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" + +#include "options.h" +#include "device_allocation.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Collection of allocations on the device +class DeviceContext { +public: + + // + // Type definitions + // + using AllocationMap = std::map; + +private: + // + // Data members + // + + /// Memory allocations that exist (owning) + DeviceAllocationList device_memory_; + + /// Non-owning set of named allocations + AllocationMap allocations_; + +public: + + /// Allocates memory of a given type, capacity (elements), and name + DeviceAllocation *allocate_block( + std::string const &name, + library::NumericTypeID type, + size_t capacity); + + /// Allocates memory of a given type, capacity (elements), and name + DeviceAllocation *allocate_tensor( + std::string const &name, + library::NumericTypeID type, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector const &stride = std::vector(), + int batch_count = 1); + + /// Allocates memory of a given type, capacity (elements), and name + DeviceAllocation *allocate_tensor( + Options const &options, + std::string const &name, + library::NumericTypeID type, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector const &stride, + int batch_count, + int seed_shift = 0); + + /// Allocates memory for sparse meta data + DeviceAllocation *allocate_sparsemeta_tensor( + Options const &options, + std::string const &name, + library::NumericTypeID type, + library::LayoutTypeID layout_id, + library::NumericTypeID type_a, + std::vector const &extent, + std::vector const &stride, + int batch_count, + int seed_shift = 0); + + /// Clears named allocations (but does not necessarily free memory) + void clear(); + + /// Frees all device memory allocations + void free(); + + /// Gets the allocation by name + DeviceAllocation &at(std::string const &name); + + size_t size() const; + + AllocationMap::iterator begin(); + AllocationMap::iterator end(); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/enumerated_types.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/enumerated_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4d913243b62b08bce805392457faaae3635b19fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/enumerated_types.h @@ -0,0 +1,169 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Provides several functions for filling tensors with data. +*/ + +#pragma once + +#include +#include +#include +#include +#include "cutlass/library/library.h" + +#define TRACE(x) { std::cout << __FILE__ << ":" << __LINE__ << " " << x << std::endl; } + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +T from_string(std::string const &); + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Enumerated type describing how the performance testbench evaluates kernels. +enum class ExecutionMode { + kProfile, ///< regular verification and profiling + kDryRun, ///< no kernels are launched or workspaces allocated; used to assess what operators might be launched + kEnumerate, ///< no kernels launched or workspaces allocated; lists all operation kind and operations + kTrace, ///< executes a single device-side computation with no other kernel launches + kInvalid +}; + +/// Converts a ExecutionMode enumerant to a string +char const *to_string(ExecutionMode mode, bool pretty = false); + +/// Parses a ExecutionMode enumerant from a string +template <> +ExecutionMode from_string(std::string const &str); + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Library algorithm mode +enum class AlgorithmMode { + kMatching, ///< compare against best matching algorithm + kBest, ///< evaluate all library algorithms and report best + kDefault, ///< use the library's default algorithm option + kInvalid +}; + +/// Converts a ExecutionMode enumerant to a string +char const *to_string(AlgorithmMode mode, bool pretty = false); + +/// Parses a ExecutionMode enumerant from a string +template <> +AlgorithmMode from_string(std::string const &str); + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Outcome of a performance test +enum class Disposition { + kPassed, + kFailed, + kNotRun, + kIncorrect, + kNotVerified, + kInvalidProblem, + kNotSupported, + kInvalid +}; + +/// Converts a Disposition enumerant to a string +char const *to_string(Disposition disposition, bool pretty = false); + +/// Parses a Disposition enumerant from a string +template <> +Disposition from_string(std::string const &str); + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Indicates when to save +enum class SaveWorkspace { + kNever, + kIncorrect, + kAlways, + kInvalid +}; + +/// Converts a SaveWorkspace enumerant to a string +char const *to_string(SaveWorkspace save_option, bool pretty = false); + +/// Parses a SaveWorkspace enumerant from a string +template <> +SaveWorkspace from_string(std::string const &str); + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Indicates the type of kernel argument +// ArgumentType can be both ScalarType or NumericType. Thus, enums kScalar and kNumeric +// 1) kScalar: e.g. of a Scalar ArgumentType is u32 is a Scalar type. +// Its c++ equivalent as "type name = initializer" is "u32 m = 32" +// 2) kNumeric: e.g. of a Numeric ArgumentType is NumericTypeID is a Numeric type. +// Its c++ equivalent as "type name = initializer" is "NumericTypeID numeric_type = u32" +enum class ArgumentTypeID { + kScalar, + kInteger, + kTensor, + kBatchedTensor, + kStructure, + kEnumerated, + kInvalid +}; + +/// Converts a ArgumentTypeID enumerant to a string +char const *to_string(ArgumentTypeID type, bool pretty = false); + +/// Parses a ArgumentTypeID enumerant from a string +template <> +ArgumentTypeID from_string(std::string const &str); + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Profiler typedefs +using ProviderVector = std::vector; +using DispositionMap = std::map; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Print vector for the report +template +std::ostream& operator<< (std::ostream& out, const std::vector& v) { + for(int i = 0; i < v.size(); ++i) { + out << to_string(v[i], true) << (i+1 != v.size() ? "," : ""); + } + return out; +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..28914a69e7e78c64f8b88b6b51a15e0d21d169fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h @@ -0,0 +1,275 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/manifest.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" +#include "reduction_operation_profiler.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Abstract base class for each math function +class GemmOperationProfiler : public OperationProfiler { +public: + + /// Problem structure obtained from problem space + struct GemmProblem { + + cutlass::library::GemmUniversalMode mode; + + int64_t m; + int64_t n; + int64_t k; + int64_t lda; + int64_t ldb; + int64_t ldc; + std::vector alpha; + std::vector beta; + + cutlass::library::SplitKMode split_k_mode; + int split_k_slices; + int batch_count; + + cutlass::library::RasterOrder raster_order; + // gemm with parallel interleaved reduction + // gemm epilogue (alpha, beta) = (1.0, 0.0) + // reduction epilogue (alpha, beta) = (GemmProblem::alpha, GemmProblem::beta) + std::vector alpha_one; + std::vector beta_zero; + + // + // Methods + // + + GemmProblem(): + mode(library::GemmUniversalMode::kGemm), + m(16), n(16), k(16), lda(0), ldb(0), ldc(0), split_k_slices(1), batch_count(1), + raster_order(cutlass::library::RasterOrder::kHeuristic){ } + + /// Parses the problem + Status parse( + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Total number of bytes loaded + int64_t bytes(library::GemmDescription const &operation_desc) const; + + /// Total number of flops computed + int64_t flops(library::GemmDescription const &operation_desc) const; + + /// Initializes a performance result + void initialize_result( + PerformanceResult &result, + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space); + }; + + /// Workspace used + struct GemmWorkspace { + + DeviceAllocation *A; + DeviceAllocation *B; + DeviceAllocation *C; + DeviceAllocation *Computed; + DeviceAllocation *Reference; + + /// Number of copies of the problem workspace which are visited sequentially during + /// profiling to avoid camping in the last level cache. + int problem_count; + + library::GemmUniversalConfiguration configuration; + library::GemmUniversalArguments arguments; + + /// Buffer used for the operation's host workspace + std::vector host_workspace; + + /// Buffer used for the operations' device workspace + DeviceAllocation device_workspace; + + /// Library configuration and arguments for reduction operator + library::ReductionConfiguration reduction_configuration; + library::ReductionArguments reduction_arguments; + + /// Buffer used for the cutlass reduction operations' host workspace + std::vector reduction_host_workspace; + + // + // Methods + // + + GemmWorkspace(): + A(nullptr), B(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr), problem_count(1) { } + }; + +protected: + + // + // Data members + // + + /// GEMM problem obtained from problem space + GemmProblem problem_; + + /// Device memory allocations + GemmWorkspace gemm_workspace_; + + /// CUTLASS parallel reduction operation to follow this* gemm operation + library::Operation const *reduction_op_; + +public: + // + // Methods + // + + /// Ctor + GemmOperationProfiler(Options const &options); + + /// Destructor + virtual ~GemmOperationProfiler(); + + GemmProblem const& problem() const { return problem_; } + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +protected: + + /// Initializes the performance result + void initialize_result_( + PerformanceResult &result, + Options const &options, + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space); + + /// Verifies CUTLASS against references + bool verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against host and device references + bool verify_with_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Method to profile a CUTLASS Operation + Status profile_cutlass_( + double &runtime, + Options const &options, + library::Operation const *operation, + void *arguments, + void *host_workspace, + void *device_workspace); + + /// Initialize reduction problem dimensions and library::Operation + bool initialize_reduction_configuration_( + library::Operation const *operation, + ProblemSpace::Problem const &problem); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gpu_timer.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gpu_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..a3d3befd2c84f7cd17962cc07ce75c21ea752b56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gpu_timer.h @@ -0,0 +1,72 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function +*/ + +#pragma once + +#include +#include "cutlass/cutlass.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +struct GpuTimer { + + cudaEvent_t events[2]; + + // + // Methods + // + + GpuTimer(); + ~GpuTimer(); + + /// Records a start event in the stream + void start(cudaStream_t stream = nullptr); + + /// Records a stop event in the stream + void stop(cudaStream_t stream = nullptr); + + /// Records a stop event in the stream and synchronizes on the stream + void stop_and_wait(cudaStream_t stream = nullptr); + + /// Returns the duration in milliseconds + double duration(int iterations = 1) const; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..92bb41e3595db9f98de5a2c8112b620af64c4539 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/operation_profiler.h @@ -0,0 +1,259 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function +*/ + +#pragma once + +#include +#include +#include +#include + +// CUTLASS includes +#include "cutlass/trace.h" + +// CUTLASS Library includes +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/manifest.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "performance_result.h" +#include "performance_report.h" +#include "problem_space.h" +#include "debug.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Abstract base class for each math function +class OperationProfiler { +public: + + +protected: + // + // Data members + // + + /// Top-level operation kind + library::OperationKind kind_; + + /// Human readable description + std::string description_; + + /// Arguments parsed from command line + ArgumentDescriptionVector arguments_; + + /// List of providers used to verify and compare each result + ProviderVector verification_providers_; + + /// Model performance result initialized by the operation profiler with workload statistics + /// and reasonable default state. + PerformanceResult model_result_; + + /// Performance result vector constructed by profiling the operation + PerformanceResultVector results_; + +public: + + // + // Methods + // + + /// Ctor + OperationProfiler(); + + OperationProfiler( + Options const &options, + library::OperationKind kind, + ArgumentDescriptionVector const &arguments = ArgumentDescriptionVector(), + ProviderVector const & verification_providers = ProviderVector()); + + /// Destructor + virtual ~OperationProfiler(); + + /// Obtains the operation kind + library::OperationKind kind() const { return kind_; } + + /// Gets the schema description + std::string const &description() const; + + /// Returns a reference to the arguments + ArgumentDescriptionVector const &arguments() const { return arguments_; } + +public: + + // + // Basic overrides + // + + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const =0; + + /// Entry point to profile all operations in the manifest + virtual int profile_all( + Options const &options, + library::Manifest const &manifest, + DeviceContext &device_context); + +public: + + // + // Operation-specific phases of verification and profiling + // + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) = 0; + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) = 0; + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) = 0; + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) = 0; + +public: + + // + // Static helpers + // + + /// Sleep for a given duration in ms + static void sleep(int sleep_duration); + + /// Returns true if the current operation description satisfies the problem space + static bool satisfies( + library::OperationDescription const &op_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Compares tensors for equality + static Disposition compare_tensors( + Options const &options, + DeviceAllocation &experimental, + DeviceAllocation &reference, + int64_t count = 0); + + static void save_workspace( + DeviceContext &device_context, + Options const &options, + library::OperationDescription const &desc, + library::Provider provider, + library::Provider verification_provider = library::Provider::kInvalid); + + /// Helper to set a performance result member + static void set_argument( + PerformanceResult &result, + char const *name, + ProblemSpace const &problem_space, + std::string const &value); + + /// Helper to set a performance result member + static void set_argument( + PerformanceResult &result, + char const *name, + ProblemSpace const &problem_space, + int64_t value); + +protected: + + /// Sets operation description + static void initialize_result_( + PerformanceResult &result, + library::OperationDescription const &operation_desc, + ProblemSpace const &problem_space); + + /// Method to profile an initialized CUTLASS operation + virtual Status profile_cutlass_( + double &runtime, + Options const &options, + library::Operation const *operation, + void *arguments, + void *host_workspace, + void *device_workspace); + +private: + /// finds string matches filter_string in operation_name + bool find_string_matches_( + std::string const &filter_string, + std::string const &operation_name); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Vector of owning operation profilers +using OperationProfilerVector = std::vector>; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/options.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/options.h new file mode 100644 index 0000000000000000000000000000000000000000..c57025fca28984309d90fea515fc05c5b347005a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/options.h @@ -0,0 +1,334 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Command line options for performance test program +*/ + +#pragma once + +#include +#include +#include + +#include + +#include "cutlass/util/command_line.h" +#include "cutlass/util/distribution.h" +#include "cutlass/library/library.h" + +#include "enumerated_types.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Global options +class Options { +public: + + /// Cublas and cuDNN options + struct Library { + + // + // Data members + // + + /// Algorithm mode + AlgorithmMode algorithm_mode; + + /// Algorithm enumerants + std::vector algorithms; + + // + // Methods + // + + Library(CommandLine const &cmdline); + + void print_usage(std::ostream &out) const; + void print_options(std::ostream &out, int indent = 0) const; + }; + + /// Options related to the selected device + struct Device { + + /// Device ID + int device; + + /// CUDA Device properties + cudaDeviceProp properties; + + /// Total memory allocation on device + size_t maximum_capacity; + + // + // Methods + // + + Device(CommandLine const &cmdline); + + void print_usage(std::ostream &out) const; + void print_options(std::ostream &out, int indent = 0) const; + void print_device_info(std::ostream &out) const; + + /// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75) + int compute_capability() const; + }; + + /// Options related to initializing input tensors + struct Initialization { + + /// If true, data is initialized randomly. If false, no initialization is performed after + /// allocating tensors. + bool enabled; + + /// If true, data distribution is set by the user and is not allowed to change + /// If false, data distribution is allowed to change based on element_type (library::NumericTypeID) + bool fix_data_distribution; + + /// Data distribution for input tensors + Distribution data_distribution; + + /// Source of random tensor elements + library::Provider provider; + + /// Random number generator seed. + int seed; + + // + // Methods + // + + Initialization(CommandLine const &cmdline); + + void print_usage(std::ostream &out) const; + void print_options(std::ostream &out, int indent = 0) const; + + /// Helper to parse a Distribution object from the command line parser + static void get_distribution( + cutlass::CommandLine const &args, + std::string const &arg, + cutlass::Distribution &dist); + }; + + /// Options related to verification of the result + struct Verification { + + // + // Data members + // + + /// If true, kernels are verified before they are profiled + bool enabled; + + /// If true, causes profiler to return an error code if no reference check is run. + /// Only valid when verification is enabled. + bool required; + + /// Relative error threshold - zero to require bit-level consistency + double epsilon; + + /// Values smaller than this are assumed to be zero + double nonzero_floor; + + /// List of providers used to verify each result + ProviderVector providers; + + /// Indicates when to save the workspace + SaveWorkspace save_workspace; + + // + // Methods + // + + Verification(CommandLine const &cmdline); + + void print_usage(std::ostream &out) const; + void print_options(std::ostream &out, int indent = 0) const; + + /// Returns true if a provider is enabled + bool provider_enabled(library::Provider provider) const; + + /// Returns the index of a provider if its enabled + size_t index(library::Provider provider) const; + }; + + /// Options related to profiling + struct Profiling { + + /// Number of workspaces to rotate through to avoid cache-resident working sets + int workspace_count; + + /// Number of iterations to warmup each kernel prior to profiling + int warmup_iterations; + + /// Number of iterations to profile each kernel - if 0, kernels are launched up to the profiling duration + int iterations; + + /// Number of ms to sleep between profiling periods (ms) + int sleep_duration; + + /// If true, profiling is actually conducted. + bool enabled; + + /// If true, profiling returns an error code if no kernels are found to match the filters. + bool error_on_no_match = false; + + /// List of providers of each functionality to be profiled + ProviderVector providers; + + // + // Methods + // + + Profiling(CommandLine const &cmdline); + + void print_usage(std::ostream &out) const; + void print_options(std::ostream &out, int indent = 0) const; + + /// Returns true if a provider is enabled + bool provider_enabled(library::Provider provider) const; + + /// Returns the index of a provider if its enabled + size_t index(library::Provider provider) const; + }; + + /// Options related to reporting + struct Report { + + /// If true, result is appended to possibly existing file + bool append; + + /// Path to a file containing results + std::string output_path; + + /// Path to a file containing junit xml results + std::string junit_output_path; + + /// Sequence of tags to attach to each result + std::vector> pivot_tags; + + /// If true, reports status of all kernels including those that were + /// not run for the given arguments + bool report_not_run; + + /// Prints human-readable text to stdout. If false, nothing is written to stdout + bool verbose; + + /// Sort results by (currently by flops-per-byte) + bool sort_results; + + /// Prints the name of the kernel being profiled before running the kernel. + /// This is useful for determining which kernel is causing a run of the profiler to hang + bool print_kernel_before_running; + + // + // Methods + // + + Report(CommandLine const &cmdline); + + void print_usage(std::ostream &out) const; + void print_options(std::ostream &out, int indent = 0) const; + }; + + /// Options related to printing usage and version information + struct About { + + /// If true, usage is printed and the program ends. + bool help; + + /// Prints version string + bool version; + + /// Print information about devices + bool device_info; + + // + // Methods + // + + About(CommandLine const &cmdline); + + void print_usage(std::ostream &out) const; + void print_options(std::ostream &out, int indent = 0) const; + + static void print_version(std::ostream &out); + }; + +public: + + // + // Data members + // + + /// Top-level execution mode + ExecutionMode execution_mode; + + /// Name of math function to profile + library::OperationKind operation_kind; + + /// Vector of operation name substrings + std::vector operation_names; + + /// Vector of operation name substrings + std::vector excluded_operation_names; + + + // + // Detailed configuration options + // + + /// Configuration + CommandLine cmdline; + Device device; + Initialization initialization; + Library library; + Verification verification; + Profiling profiling; + Report report; + About about; + +public: + + Options(CommandLine const &cmdline); + + void print_usage(std::ostream &out) const; + void print_options(std::ostream &out) const; + + static std::string indent_str(int indent); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_report.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_report.h new file mode 100644 index 0000000000000000000000000000000000000000..b74d069a76586740ff9897961809a8e0b5a29044 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_report.h @@ -0,0 +1,127 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Class performing output during profiling +*/ + +#pragma once + +#include +#include + +// CUTLASS Profiler includes +#include "options.h" +#include "enumerated_types.h" +#include "performance_result.h" + +// CUTLASS Library includes +#include "cutlass/library/library.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +class PerformanceReport { +private: + + /// Reference to options + Options const &options_; + + /// Operation kind + library::OperationKind op_kind_; + + /// Operation file name containing performance report of op_kind + std::string op_file_name_; + + /// Output file containing results + std::ofstream output_file_; + + /// Operation file name containing junit performance report of op_kind + std::string op_junit_file_name_; + + /// Output file containing junit results + std::ofstream junit_output_file_; + + /// Flag indicating the performance report is valid + bool good_; + + /// Vector of argument names + std::vector argument_names_; + + /// Counter uniquely identifying problem within the report + size_t problem_index_; + + /// Collection of all results + PerformanceResultVector concatenated_results_; + +public: + + PerformanceReport(Options const &options, std::vector const &argument_names, library::OperationKind const &op_kind); + ~PerformanceReport(); + + bool good() const { return good_; } + + void next_problem(); + void append_result(PerformanceResult result); + void sort_results(PerformanceResultVector &results); + void append_results(PerformanceResultVector const &results); + +public: + + /// Prints the CSV header + std::ostream & print_csv_header_(std::ostream &out); + + /// Prints the CSV + std::ostream & print_result_csv_(std::ostream &out, PerformanceResult const &result); + + /// @defgroup jUnit Result Generation + /// Functions related to generation of the jUnit results + /// @{ + + std::ostream & print_junit_header_(std::ostream &out); + std::ostream & print_junit_result_(std::ostream &out, PerformanceResult const &result); + std::ostream & print_junit_footer_(std::ostream &out); + + /// @} + + /// Prints the result in human readable form + std::ostream & print_result_pretty_( + std::ostream &out, + PerformanceResult const &result, + bool use_shell_coloring = true); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_result.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_result.h new file mode 100644 index 0000000000000000000000000000000000000000..c714e02f8ef08e9913258e1048cd0aa88c69c76f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_result.h @@ -0,0 +1,128 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function +*/ + +#pragma once + +#include + +#include "cutlass/cutlass.h" + +// CUTLASS Profiler includes +#include "enumerated_types.h" + +// CUTLASS Library includes +#include "cutlass/library/library.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Performance result object +struct PerformanceResult { + + /// Index of problem + size_t problem_index; + + /// library::Provider + library::Provider provider; + + /// Operation kind + library::OperationKind op_kind; + + /// CUTLASS status result from kernels (success or failure) + // Status does information on verification + Status status; + + /// Outcome of verification (worst case verification result) + Disposition disposition; + + /// Outcome of verification (all verification results) + DispositionMap verification_map; + + /// Operation name + std::string operation_name; + + /// Stringified vector of argument values + std::vector > arguments; + + /// Number of bytes read or written + int64_t bytes; + + /// Number of DL flops performed by the math function + int64_t flops; + + /// Average runtime in ms + double runtime; + + // + // Members + // + + /// Ctor + PerformanceResult(): + problem_index(0), + op_kind(library::OperationKind::kInvalid), + provider(library::Provider::kInvalid), + disposition(Disposition::kNotRun), + status(Status::kInvalid), + bytes(0), + flops(0), + runtime(0) + { } + + /// Returns true if the runtime is valid + bool good() const { + return runtime > 0; + } + + /// Math throughput in units of GFLOP/s + double gflops_per_sec() const { + return double(flops) / runtime / 1.0e6; + } + + /// memory bandwidth in units of GiB/s + double gbytes_per_sec() const { + return double(bytes) / double(1 << 30) / runtime * 1000.0; + } + +}; + +using PerformanceResultVector = std::vector; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/problem_space.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/problem_space.h new file mode 100644 index 0000000000000000000000000000000000000000..651444598e882afbffb91d32e1652cb0ff576523 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/problem_space.h @@ -0,0 +1,1014 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief + + "Any sufficiently complicated C or Fortran program contains an ad-hoc, informally-specified, + bug-ridden, slow implementation of half of Common Lisp." + + - Greenspun's Tenth Rule of Programming + + + cutlass::profiler::ProblemSpace defines a set of data structures which represent the Cartesian + product of sequences defined by integer ranges, lists of scalars, and sets of enumerated types. + + These permit a single invocation of the CUTLASS Profiler to iterate over a large set of problems, + verify and profile various operations when they are compatible with the command line, and + construct data tables of results that are convenient inputs to post processing in Excel or Pandas. + + By executing multiple problems per invocation, startup overheads may be amortized across many + kernel launches. +*/ + +#pragma once + +// Standard Library includes +#include +#include +#include +#include +#include + +// CUTLASS Utility includes +#include "cutlass/util/command_line.h" + +// CUTLASS Library includes +#include "cutlass/library/library.h" + +// Profiler includes +#include "enumerated_types.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Defines the argument schema +struct ArgumentDescription { + + /// Type of argument + ArgumentTypeID type; + + /// Prioritized array of aliases used in command line parsing + std::vector aliases; + + /// Description of argument + std::string description; + + // + // Methods + // + + /// Default ctor + ArgumentDescription(): + type(ArgumentTypeID::kInvalid) { } + + /// Constructor with aliases + ArgumentDescription( + ArgumentTypeID type_, + std::vector const &aliases_, + std::string const &description_ + ): + type(type_), aliases(aliases_), description(description_) { } +}; + +/// Vector of arguments +using ArgumentDescriptionVector = std::vector; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Base class for kernel arguments +struct KernelArgument { + + // + // Type definitions + // + + /// Value base class + struct Value { + + KernelArgument const *argument; + bool not_null; + + // + // Methods + // + + Value( + KernelArgument const *argument_ = nullptr, + bool not_null_ = true + ): argument(argument_), not_null(not_null_) { } + + virtual ~Value() { } + + virtual std::ostream &print(std::ostream &out) const =0; + }; + + /// Abstract base class to iterate over values within arguments + struct ValueIterator { + + /// Indicates type of kernel argument + KernelArgument const *argument; + + /// If the iterator points to an argument that is null, it needs to be distinguished + /// from end. + bool null_argument; + + // + // Methods + // + + /// Constructs a value iterator - no methods are valid if argument_ == nullptr + ValueIterator( + KernelArgument const *argument_ = nullptr, + bool null_argument_ = false): + argument(argument_), null_argument(null_argument_) { + + if (!argument_->not_null()) { + null_argument = true; + } + } + + virtual ~ValueIterator() { } + + /// Advances to next point in range + virtual void operator++() = 0; + + /// Compares against another value iterator - must be of the same KernelArgument type + virtual bool operator==(ValueIterator const &it) const = 0; + + /// Returns a unique_ptr object pointing to a newly created value object + virtual std::unique_ptr at() const = 0; + + /// Gets the type of the iterator + ArgumentTypeID type() const { + return argument->description->type; + } + + /// Helper to compute inequality + bool operator!=(ValueIterator const &it) const { + return !(*this == it); + } + + std::ostream &print(std::ostream &out) const; + }; + + // + // Data members + // + + /// Describes the argument + ArgumentDescription const *description; + + /// Parent node + KernelArgument *parent; + + /// Sequence in which the kernel argument is to be iterated over. + /// Smaller means faster changing. -1 is don't care + int ordinal; + + // + // Methods + // + + /// Default ctor + KernelArgument( + ArgumentDescription const *description_ = nullptr, + KernelArgument *parent_ = nullptr, + int ordinal_ = -1 + ): description(description_), parent(parent_), ordinal(ordinal_) { } + + virtual ~KernelArgument(); + + /// Returns true if the kernel argument iself is empty + virtual bool not_null() const =0; + + /// Returns a string name for debugging + std::string qualified_name() const { + if (description) { + if (description->aliases.empty()) { + return ""; + } + return description->aliases.front(); + } + return ""; + } + + virtual std::unique_ptr begin() const =0; + virtual std::unique_ptr end() const =0; +}; + +using KernelArgumentVector = std::vector>; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Defines a scalar argument type as a string that is lexically cast to the appropriate kernel +/// type. +struct ScalarArgument : public KernelArgument { + + // + // Type definitions + // + + /// Value type + struct ScalarValue : public KernelArgument::Value { + + std::string value; + + // + // Methods + // + + ScalarValue( + std::string const &value_ = "", + ScalarArgument const *argument = nullptr, + bool not_null_ = true + ); + + virtual std::ostream &print(std::ostream &out) const; + }; + + using ValueCollection = std::vector; + + /// Abstract base class to iterate over values within arguments + struct ScalarValueIterator : public KernelArgument::ValueIterator { + + // + // Data members + // + + ValueCollection::const_iterator value_it; + + // + // Methods + // + + ScalarValueIterator(ScalarArgument const *argument = nullptr); + + virtual void operator++(); + virtual bool operator==(ValueIterator const &it) const; + + /// Gets the value pointed to + virtual std::unique_ptr at() const; + }; + + // + // Data members + // + + /// Set of possible values + ValueCollection values; + + // + // Methods + // + + /// Default ctor + ScalarArgument( + ArgumentDescription const *description + ): + KernelArgument(description) { } + + virtual bool not_null() const { + return !values.empty(); + } + + virtual std::unique_ptr begin() const; + virtual std::unique_ptr end() const; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Closed range supporting additive increment +struct Range { + + // + // Type definitions + // + + enum class Mode { + kSequence, + kRandom, + kRandomLog2, + kInvalid + }; + + struct Iterator { + + int64_t value; + int64_t increment; + Range const *range; + + // + // Methods + // + + Iterator( + int64_t value_ = 0, + int64_t increment_ = 1, + Range const *range_ = nullptr + ): + value(value_), increment(increment_), range(range_) { } + + Iterator & operator++() { + value += increment; + return *this; + } + + Iterator operator++(int) { + Iterator self(*this); + ++(*this); + return self; + } + + bool operator==(Iterator const &it) const { + return value == it.value; + } + + bool operator!=(Iterator const &it) const { + return !(*this == it); + } + + static int64_t round(int64_t value, int64_t divisible) { + int64_t rem = (value % divisible); + + // Round either up or down + if (rem > divisible / 2) { + value += (divisible - rem); + } + else { + value -= rem; + } + + return value; + } + + int64_t at() const { + if (!range) { + return value; + } + + switch (range->mode) { + case Mode::kSequence: return value; + + case Mode::kRandom: { + double rnd = double(range->minimum) + + double(std::rand()) / double(RAND_MAX) * (double(range->maximum) - double(range->minimum)); + + int64_t value = int64_t(rnd); + + return round(value, range->divisible); + } + break; + + case Mode::kRandomLog2: { + double lg2_minimum = std::log(double(range->minimum)) / std::log(2.0); + double lg2_maximum = std::log(double(range->maximum)) / std::log(2.0); + double rnd = lg2_minimum + double(std::rand()) / double(RAND_MAX) * (lg2_maximum - lg2_minimum); + + int64_t value = int64_t(std::pow(2.0, rnd)); + + return round(value, range->divisible); + } + break; + default: break; + } + return value; + } + + int64_t operator*() const { + return at(); + } + }; + + // + // Data members + // + + int64_t first; ///< first element in range + int64_t last; ///< last element in range + int64_t increment; ///< additive increment between values + + Mode mode; ///< mode selection enables alternative values + int64_t minimum; ///< minimum value to return + int64_t maximum; ///< maximum value to return + int64_t divisible; ///< rounds value down to an integer multiple of this value + + // + // Methods + // + + /// Default constructor - range acts as a scalar + Range(int64_t first_ = 0): first(first_), last(first_), increment(1), mode(Mode::kSequence), minimum(0), maximum(0), divisible(1) { } + + /// Range acts as a range + Range( + int64_t first_, + int64_t last_, + int64_t increment_ = 1, + Mode mode_ = Mode::kSequence, + int64_t minimum_ = 0, + int64_t maximum_ = 0, + int64_t divisible_ = 1 + ): first(first_), last(last_), increment(increment_), mode(mode_), minimum(minimum_), maximum(maximum_), divisible(divisible_) { + + // Helpers to avoid constructing invalid ranges + if (increment > 0) { + if (last < first) { + std::swap(last, first); + } + } + else if (increment < 0) { + if (first < last) { + std::swap(last, first); + } + } + else if (last != first) { + last = first; + increment = 1; + } + } + + /// Helper to construct a sequence range + static Range Sequence(int64_t first_, int64_t last_, int64_t increment_ = 1) { + return Range(first_, last_, increment_, Mode::kSequence); + } + + /// Helper to construct a range that is a random distribution + static Range Random(int64_t minimum_, int64_t maximum_, int64_t count_, int64_t divisible_ = 1) { + return Range(1, count_, 1, Mode::kRandom, minimum_, maximum_, divisible_); + } + + /// Helper to construct a range that is a random distribution over a log scale + static Range RandomLog2(int64_t minimum_, int64_t maximum_, int64_t count_, int64_t divisible_ = 1) { + return Range(1, count_, 1, Mode::kRandomLog2, minimum_, maximum_, divisible_); + } + + /// Returns an iterator to the first element within the range + Iterator begin() const { + return Iterator(first, increment, this); + } + + /// Returns an iterator to the first element *after* the range + Iterator end() const { + return Iterator(first + ((last - first)/increment + 1) * increment, increment, this); + } +}; + +/// Integer-valued argument - represented as a list of integer-valued ranges +struct IntegerArgument : public KernelArgument { + + // + // Type definitions + // + + /// Value type + struct IntegerValue : public KernelArgument::Value { + + int64_t value; + + // + // Methods + // + + IntegerValue( + int64_t value_ = 0, + IntegerArgument const *argument_ = nullptr, + bool not_null_ = true + ); + + /// Pretty printer for debugging + virtual std::ostream &print(std::ostream &out) const; + }; + + /// Collection of ranges represent the IntegerArgument's state + using RangeCollection = std::vector; + + /// Abstract base class to iterate over values within arguments + struct IntegerValueIterator : public KernelArgument::ValueIterator { + + // + // Data members + // + + RangeCollection::const_iterator range_it; + Range::Iterator value_it; + + // + // Methods + // + + IntegerValueIterator(); + IntegerValueIterator(IntegerArgument const *argument); + + virtual void operator++(); + virtual bool operator==(ValueIterator const &it) const; + + /// Gets the value pointed to + virtual std::unique_ptr at() const; + }; + + // + // Data members + // + + /// Set of possible values + RangeCollection ranges; + + // + // Methods + // + + /// Default ctor + IntegerArgument( + ArgumentDescription const *description + ): + KernelArgument(description) { } + + virtual bool not_null() const { + bool _not_null = !ranges.empty(); + return _not_null; + } + + virtual std::unique_ptr begin() const; + virtual std::unique_ptr end() const; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure defining the data type of tensors +struct TensorArgument : public KernelArgument { + + // + // Type definitions + // + + struct TensorDescription { + + /// Data type of elements + library::NumericTypeID element; + + /// Layout definition + library::LayoutTypeID layout; + + /// Computed extent + std::vector extent; + + /// Enables directly specifying stride value used to size tensor + std::vector stride; + + // + // Methods + // + + TensorDescription( + library::NumericTypeID element_ = library::NumericTypeID::kUnknown, + library::LayoutTypeID layout_ = library::LayoutTypeID::kUnknown, + std::vector extent_ = std::vector(), + std::vector stride_ = std::vector() + ): + element(element_), layout(layout_), extent(extent_), stride(stride_) {} + }; + + using ValueCollection = std::vector; + + /// Value structure + struct TensorValue : public KernelArgument::Value { + + TensorDescription desc; + + // + // Methods + // + + TensorValue( + TensorDescription const &desc_ = TensorDescription(), + TensorArgument const *argument_ = nullptr, + bool not_null_ = true + ); + + /// Pretty printer for debugging + virtual std::ostream &print(std::ostream &out) const; + }; + + /// Abstract base class to iterate over values within arguments + struct TensorValueIterator : public KernelArgument::ValueIterator { + + // + // Data members + // + + ValueCollection::const_iterator value_it; + + // + // Methods + // + + TensorValueIterator(TensorArgument const *argument_); + + virtual void operator++(); + virtual bool operator==(ValueIterator const &it) const; + + /// Gets the value pointed to + virtual std::unique_ptr at() const; + }; + + /// Set of possible values + ValueCollection values; + + // + // Methods + // + + /// Default ctor + TensorArgument( + ArgumentDescription const *description + ): + KernelArgument(description) { } + + virtual bool not_null() const { + return !values.empty(); + } + + virtual std::unique_ptr begin() const; + virtual std::unique_ptr end() const; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Numeric data type +struct EnumeratedTypeArgument : public KernelArgument { + + // + // Type definitions + // + + struct EnumeratedTypeValue : public KernelArgument::Value { + + /// Data type of element + std::string element; + + // + // Methods + // + + EnumeratedTypeValue( + std::string const &element_ = std::string(), + EnumeratedTypeArgument const *argument_ = nullptr, + bool not_null_ = true + ); + + /// Pretty printer for debugging + virtual std::ostream &print(std::ostream &out) const; + }; + + using ValueCollection = std::vector; + + /// Abstract base class to iterate over values within arguments + struct EnumeratedTypeValueIterator : public KernelArgument::ValueIterator { + + // + // Data members + // + + ValueCollection::const_iterator value_it; + + // + // Methods + // + + EnumeratedTypeValueIterator(EnumeratedTypeArgument const *argument_ = nullptr); + + virtual void operator++(); + virtual bool operator==(ValueIterator const &it) const; + + /// Gets the value pointed to + virtual std::unique_ptr at() const; + }; + + // + // Data members + // + + ValueCollection values; + + // + // Members + // + + /// Default ctor + EnumeratedTypeArgument(ArgumentDescription const *description): + KernelArgument(description) {} + + virtual bool not_null() const { + return !values.empty(); + } + + virtual std::unique_ptr begin() const; + virtual std::unique_ptr end() const; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Object storing the space argument values +class ProblemSpace { +public: + + /// Tuple of arguments + using Problem = std::vector>; + + /// Type used to iterator over things + using IteratorVector = std::vector>; + + /// Iterates over points in the design space + class Iterator { + private: + + /// One iterator per argument + IteratorVector iterators; + + public: + + // + // Methods + // + + explicit Iterator(); + Iterator(ProblemSpace const &problem_space); + Iterator(Iterator &&it); + + // Rule of three + Iterator(Iterator const &) = delete; + Iterator &operator=(Iterator const &it) = delete; + ~Iterator() = default; + + /// Pre-increment - advances to next point in argument range + void operator++(); + + /// Gets the current argument value + Problem at() const; + + /// Moves iterator to end + void move_to_end(); + + /// Equality operator + bool operator==(Iterator const &it) const; + + /// Inequality operator + bool operator!=(Iterator const &it) const { + return !(*this == it); + } + + /// Helper to call at() method + Problem operator*() const { + return at(); + } + + /// Helper to print iterator state + std::ostream & print(std::ostream &out) const; + + private: + + /// Helper for recursively constructing iterators + void construct_(KernelArgument const *argument); + }; + +public: + + // + // Data members + // + + KernelArgumentVector arguments; + + /// Map of argument names to their position within the argument vector + std::unordered_map argument_index_map; + +public: + + // + // Methods + // + + /// Default ctor + ProblemSpace() {} + + /// Constructs a problem space from a vector of arguments. This vector must outlive + /// the ProblemSpace object, which stores pointers to objects within the + /// ArgumentDescriptionVector. + ProblemSpace(ArgumentDescriptionVector const &schema, CommandLine const &cmdline); + + Iterator begin() const; // returns an iterator to the first point in the range + Iterator end() const; // returns an iterator to the first point after the range + + /// Returns the index of an argument by name + size_t argument_index(char const *name) const; + + /// Gets all argument names as an ordered vector + std::vector argument_names() const; + + /// Returns the number of dimensions of the problem space + size_t rank() const { return arguments.size(); } + +private: + + /// Helper for recursively cloning + void clone_( + KernelArgumentVector &kernel_args, + ArgumentDescription const *arg_desc); + + /// Parses command line argument + void parse_( + KernelArgument *arg, + CommandLine const &cmdline); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexically casts an argument to an int if it is defined. Returns true if not null. +bool arg_as_int(int &int_value, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_int(int64_t &int_value, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_int( + int &int_value, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_int( + int64_t &int_value, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_NumericTypeID(library::NumericTypeID &numeric_type, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_NumericTypeID( + library::NumericTypeID &numeric_type, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_LayoutTypeID(library::LayoutTypeID &layout_type, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_LayoutTypeID( + library::LayoutTypeID &layout_type, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_OpcodeClassID(library::OpcodeClassID &opcode_class, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_OpcodeClassID( + library::OpcodeClassID &opcode_class, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_SplitKModeID(library::SplitKMode &split_k_mode, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_SplitKModeID( + library::SplitKMode &split_k_mode, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_ConvModeID(library::ConvModeID &conv_mode, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_ConvModeID( + library::ConvModeID &conv_mode, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_IteratorAlgorithmID(library::IteratorAlgorithmID &iterator_algorithm, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_IteratorAlgorithmID( + library::IteratorAlgorithmID &iterator_algorithm, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_RasterOrder(library::RasterOrder &raster_order, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_RasterOrder( + library::RasterOrder &raster_order, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_ProviderID(library::Provider &provider, KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_ProviderID( + library::Provider &provider, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Lexically casts an argument to a given type stored in a byte array. Returns true if not null. +bool arg_as_scalar( + std::vector &bytes, + library::NumericTypeID numeric_type, + KernelArgument::Value const *value_ptr); + +/// Lexically casts an argument to a given type stored in a byte array. Returns true if not null. +bool arg_as_scalar( + std::vector &bytes, + library::NumericTypeID numeric_type, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Returns true if a tensor description satisfies a `tensor` value +bool tensor_description_satisfies( + library::TensorDescription const &tensor_desc, + TensorArgument::TensorValue const *value_ptr); + +/// Returns true if a tensor description satisfies a `tensor` value +bool tensor_description_satisfies( + library::TensorDescription const &tensor_desc, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + +/// Returns true if a conv kind satisfies the value +bool conv_kind_satisfies( + library::ConvKind const &conv_kind, + EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr); + +/// Returns true if a conv kind satisfies the value +bool conv_kind_satisfies( + library::ConvKind const &conv_kind, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +/// Returns true if a iterator algorithm satisfies the value +bool iterator_algorithm_satisfies( + library::IteratorAlgorithmID const &iterator_algorithm, + EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr); + +/// Returns true if a iterator algorithm satisfies the value +bool iterator_algorithm_satisfies( + library::IteratorAlgorithmID const &iterator_algorithm, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_2k_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_2k_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..6dbfc3fbf7f589a3cd567c1cd08109fa11a311eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_2k_operation_profiler.h @@ -0,0 +1,229 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function + + +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/blas3.h" +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/manifest.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +/// Abstract base class for each math function +class Rank2KOperationProfiler : public OperationProfiler { +public: + + /// Problem structure obtained from problem space + struct RankKProblem { + int64_t n; + int64_t k; + int64_t lda; + int64_t ldb; + int64_t ldc; + FillMode fill_mode; + BlasMode blas_mode; + std::vector alpha; + std::vector beta; + int64_t split_k_slices; + int64_t batch_count; + + // + // Methods + // + + RankKProblem(): + n(16), k(16), lda(0), ldc(0), + fill_mode(FillMode::kInvalid), blas_mode(BlasMode::kInvalid), + split_k_slices(1), batch_count(1) { } + + /// Parses the problem + Status parse( + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Total number of bytes loaded + int64_t bytes(library::RankKDescription const &operation_desc) const; + + /// Total number of flops computed + int64_t flops(library::RankKDescription const &operation_desc) const; + + /// Initializes a performance result + void initialize_result( + PerformanceResult &result, + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space); + }; + + /// Workspace used + struct RankKWorkspace { + + DeviceAllocation *A; + DeviceAllocation *B; + DeviceAllocation *C; + DeviceAllocation *Computed; + DeviceAllocation *Reference; + + library::RankKConfiguration configuration; + library::RankKArguments arguments; + + /// Buffer used for the operation's host workspace + std::vector host_workspace; + + /// Buffer used for the operations' device workspace + DeviceAllocation device_workspace; + + // + // Methods + // + + RankKWorkspace(): + A(nullptr), B(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) { } + }; + +protected: + + // + // Data members + // + + /// GEMM problem obtained from problem space + RankKProblem problem_; + + /// Device memory allocations + RankKWorkspace rank_k_workspace_; + + +public: + // + // Methods + // + + /// Ctor + Rank2KOperationProfiler(Options const &options); + + /// Destructor + virtual ~Rank2KOperationProfiler(); + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +protected: + + /// Initializes the performance result + void initialize_result_( + PerformanceResult &result, + Options const &options, + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space); + + /// Verifies CUTLASS against references + bool verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_k_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_k_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..779509a84359b6eb058d3c1a128cb63b6d920ba0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_k_operation_profiler.h @@ -0,0 +1,227 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function + + +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/blas3.h" +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/manifest.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +/// Abstract base class for each math function +class RankKOperationProfiler : public OperationProfiler { +public: + + /// Problem structure obtained from problem space + struct RankKProblem { + int64_t n; + int64_t k; + int64_t lda; + int64_t ldc; + FillMode fill_mode; + BlasMode blas_mode; + std::vector alpha; + std::vector beta; + int64_t split_k_slices; + int64_t batch_count; + + // + // Methods + // + + RankKProblem(): + n(16), k(16), lda(0), ldc(0), + fill_mode(FillMode::kInvalid), blas_mode(BlasMode::kInvalid), + split_k_slices(1), batch_count(1) { } + + /// Parses the problem + Status parse( + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Total number of bytes loaded + int64_t bytes(library::RankKDescription const &operation_desc) const; + + /// Total number of flops computed + int64_t flops(library::RankKDescription const &operation_desc) const; + + /// Initializes a performance result + void initialize_result( + PerformanceResult &result, + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space); + }; + + /// Workspace used + struct RankKWorkspace { + + DeviceAllocation *A; + DeviceAllocation *C; + DeviceAllocation *Computed; + DeviceAllocation *Reference; + + library::RankKConfiguration configuration; + library::RankKArguments arguments; + + /// Buffer used for the operation's host workspace + std::vector host_workspace; + + /// Buffer used for the operations' device workspace + DeviceAllocation device_workspace; + + // + // Methods + // + + RankKWorkspace(): + A(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) { } + }; + +protected: + + // + // Data members + // + + /// GEMM problem obtained from problem space + RankKProblem problem_; + + /// Device memory allocations + RankKWorkspace rank_k_workspace_; + + +public: + // + // Methods + // + + /// Ctor + RankKOperationProfiler(Options const &options); + + /// Destructor + virtual ~RankKOperationProfiler(); + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +protected: + + /// Initializes the performance result + void initialize_result_( + PerformanceResult &result, + Options const &options, + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space); + + /// Verifies CUTLASS against references + bool verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/reduction_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/reduction_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..eef73507b51ede03d648e9a1298cde9a0b46a6d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/reduction_operation_profiler.h @@ -0,0 +1,173 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines profiling functionality for reduction operation + +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/manifest.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" +#if CUTLASS_ENABLE_CUDNN +#include "cudnn_helpers.h" +#endif //#if CUTLASS_ENABLE_CUDNN +#include "debug.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Abstract base class for each math function +class ReductionOperationProfiler : public OperationProfiler { +public: + + + /// Workspace used + struct ReductionWorkspace { + + /// Conv device allocations + DeviceAllocation *Workspace; + DeviceAllocation *Source; + DeviceAllocation *Destination; + DeviceAllocation *Reference; + + /// Library configuration and arguments + library::ReductionConfiguration configuration; + library::ReductionArguments arguments; + + /// Buffer used for the cutlass operations' host workspace + std::vector host_workspace; + + /// Buffer used for the cutlass operations' device workspace + DeviceAllocation device_workspace; + + // + // Methods + // + + ReductionWorkspace(): + Workspace(nullptr), Source(nullptr), Destination(nullptr), Reference(nullptr) { } + }; + +protected: + + // + // Data members + // + + /// Reduction problem obtained from problem space + MatrixCoord problem_; + + /// Device memory allocations + ReductionWorkspace conv_workspace_; + + +public: + // + // Methods + // + + /// Ctor + ReductionOperationProfiler(Options const &options); + + /// Destructor + virtual ~ReductionOperationProfiler(); + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/sparse_gemm_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/sparse_gemm_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..c1f11c98b5350ed6dd798ebc682cb7bf0634d3a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/sparse_gemm_operation_profiler.h @@ -0,0 +1,214 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief + +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/manifest.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" +#include "gemm_operation_profiler.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Abstract base class for each math function +class SparseGemmOperationProfiler : public OperationProfiler { +public: + + /// Problem structure obtained from problem space + struct SparseGemmProblem { + int64_t m; + int64_t n; + int64_t k; + int64_t lda; + int64_t ldb; + int64_t ldc; + int64_t lde; + std::vector alpha; + std::vector beta; + int64_t split_k_slices; + int64_t batch_count; + static int const sparse = 2; + // every 128b ElementA uses one elementE + int elements_per_128b; + + // + // Methods + // + + SparseGemmProblem(): + m(16), n(16), k(16), lda(0), ldb(0), ldc(0), lde(0), split_k_slices(1), batch_count(1) { } + + /// Parses the problem + Status parse( + library::SparseGemmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes a performance result + void initialize_result( + PerformanceResult &result, + library::SparseGemmDescription const &operation_desc, + ProblemSpace const &problem_space); + }; + + /// Workspace used + struct SparseGemmWorkspace { + + DeviceAllocation *A; + DeviceAllocation *B; + DeviceAllocation *C; + DeviceAllocation *E; + DeviceAllocation *Computed; + DeviceAllocation *Reference; + + library::SparseGemmConfiguration configuration; + library::SparseGemmArguments arguments; + + /// Buffer used for the operation's host workspace + std::vector host_workspace; + + /// Buffer used for the operations' device workspace + DeviceAllocation device_workspace; + + // + // Methods + // + + SparseGemmWorkspace(): + A(nullptr), B(nullptr), C(nullptr), E(nullptr), Computed(nullptr), Reference(nullptr) { } + }; + +protected: + + // + // Data members + // + + // GEMM problem + SparseGemmProblem problem_; + + /// Device memory allocations + SparseGemmWorkspace gemm_workspace_; + + +public: + // + // Methods + // + + /// Ctor + SparseGemmOperationProfiler(Options const &options); + + /// Destructor + virtual ~SparseGemmOperationProfiler(); + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +protected: + + /// Initializes the performance result + void initialize_result_( + PerformanceResult &result, + Options const &options, + library::SparseGemmDescription const &operation_desc, + ProblemSpace const &problem_space); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/symm_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/symm_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..a0162b4a2fba385841bdebaad1d952aac757c7a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/symm_operation_profiler.h @@ -0,0 +1,230 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function + + +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/blas3.h" +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/manifest.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +/// Abstract base class for each math function +class SymmOperationProfiler : public OperationProfiler { +public: + + /// Problem structure obtained from problem space + struct SymmProblem { + int64_t m; + int64_t n; + int64_t lda; + int64_t ldb; + int64_t ldc; + SideMode side_mode; + FillMode fill_mode; + BlasMode blas_mode; + std::vector alpha; + std::vector beta; + int64_t split_k_slices; + int64_t batch_count; + + // + // Methods + // + + SymmProblem(): + m(16), n(16), lda(0), ldb(0), ldc(0), + side_mode(SideMode::kInvalid), fill_mode(FillMode::kInvalid), blas_mode(BlasMode::kInvalid), + split_k_slices(1), batch_count(1) { } + + /// Parses the problem + Status parse( + library::SymmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Total number of bytes loaded + int64_t bytes(library::SymmDescription const &operation_desc) const; + + /// Total number of flops computed + int64_t flops(library::SymmDescription const &operation_desc) const; + + /// Initializes a performance result + void initialize_result( + PerformanceResult &result, + library::SymmDescription const &operation_desc, + ProblemSpace const &problem_space); + }; + + /// Workspace used + struct SymmWorkspace { + + DeviceAllocation *A; + DeviceAllocation *B; + DeviceAllocation *C; + DeviceAllocation *Computed; + DeviceAllocation *Reference; + + library::SymmConfiguration configuration; + library::SymmArguments arguments; + + /// Buffer used for the operation's host workspace + std::vector host_workspace; + + /// Buffer used for the operations' device workspace + DeviceAllocation device_workspace; + + // + // Methods + // + + SymmWorkspace(): + A(nullptr), B(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) { } + }; + +protected: + + // + // Data members + // + + /// GEMM problem obtained from problem space + SymmProblem problem_; + + /// Device memory allocations + SymmWorkspace symm_workspace_; + + +public: + // + // Methods + // + + /// Ctor + SymmOperationProfiler(Options const &options); + + /// Destructor + virtual ~SymmOperationProfiler(); + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +protected: + + /// Initializes the performance result + void initialize_result_( + PerformanceResult &result, + Options const &options, + library::SymmDescription const &operation_desc, + ProblemSpace const &problem_space); + + /// Verifies CUTLASS against references + bool verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/trmm_operation_profiler.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/trmm_operation_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..32ebcdad923f4843accf455e1466bbf8658abcc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/trmm_operation_profiler.h @@ -0,0 +1,222 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function + + +*/ + +#pragma once + +#include +#include +#include +#include +#include + +// CUTLASS Library includes +#include "cutlass/blas3.h" +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" +#include "cutlass/library/manifest.h" + +// Profiler includes +#include "options.h" +#include "device_context.h" +#include "operation_profiler.h" +#include "performance_result.h" +#include "problem_space.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Abstract base class for each math function +class TrmmOperationProfiler : public OperationProfiler { +public: + + /// Problem structure obtained from problem space + struct TrmmProblem { + int64_t m; + int64_t n; + int64_t lda; + int64_t ldb; + int64_t ldd; + SideMode side_mode; + FillMode fill_mode; + DiagType diag_type; + std::vector alpha; + std::vector beta; + int64_t split_k_slices; + int64_t batch_count; + + // + // Methods + // + + TrmmProblem(): + m(16), n(16), lda(0), ldb(0), ldd(0), split_k_slices(1), batch_count(1) { } + + /// Parses the problem + Status parse( + library::TrmmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes a performance result + void initialize_result( + PerformanceResult &result, + library::TrmmDescription const &operation_desc, + ProblemSpace const &problem_space); + }; + + /// Workspace used + struct TrmmWorkspace { + + DeviceAllocation *A; + DeviceAllocation *B; + DeviceAllocation *D; + DeviceAllocation *Computed; + DeviceAllocation *Reference; + + library::TrmmConfiguration configuration; + library::TrmmArguments arguments; + + /// Buffer used for the operation's host workspace + std::vector host_workspace; + + /// Buffer used for the operations' device workspace + DeviceAllocation device_workspace; + + // + // Methods + // + + TrmmWorkspace(): + A(nullptr), B(nullptr), D(nullptr), Computed(nullptr), Reference(nullptr) { } + }; + +protected: + + // + // Data members + // + + /// GEMM problem obtained from problem space + TrmmProblem problem_; + + /// Device memory allocations + TrmmWorkspace trmm_workspace_; + + +public: + // + // Methods + // + + /// Ctor + TrmmOperationProfiler(Options const &options); + + /// Destructor + virtual ~TrmmOperationProfiler(); + + /// Prints usage statement for the math function + virtual void print_usage(std::ostream &out) const; + + /// Prints examples + virtual void print_examples(std::ostream &out) const; + + /// Extracts the problem dimensions + virtual Status initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Initializes workspace + virtual Status initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Verifies CUTLASS against references + virtual bool verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + + /// Measures performance results + virtual bool profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +protected: + + /// Initializes the performance result + void initialize_result_( + PerformanceResult &result, + Options const &options, + library::TrmmDescription const &operation_desc, + ProblemSpace const &problem_space); + + /// Verifies CUTLASS against references + bool verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem); + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/conv2d_operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/conv2d_operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..f231510e70c8ad4c01cbb88b1779fc7756e27636 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/conv2d_operation_profiler.cu @@ -0,0 +1,1491 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Convolution 2D profiling +*/ + +#include +#include +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/profiler/conv2d_operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" +///////////////////////////////////////////////////////////////////////////////////////////////// +using namespace cutlass::library; + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +Conv2dOperationProfiler::Conv2dOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kConv2d, + { + {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"}, + {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv2d problem space"}, + {ArgumentTypeID::kInteger, {"g", "groups"}, "Number of convolution groups"}, + {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"}, + {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"}, + {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"}, + {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"}, + {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"}, + {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"}, + {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"}, + {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"}, + {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"}, + {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"}, + {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"}, + {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"}, + }, + { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN } + ) { + + description_ = " Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D)"; + +} + +/// Destructor +Conv2dOperationProfiler::~Conv2dOperationProfiler() { + +} + + +/// Prints usage statement for the math function +void Conv2dOperationProfiler::print_usage(std::ostream &out) const { + out << "Conv2d" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void Conv2dOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular convolution (specify all the convolution parameters):\n" + << " $ cutlass_profiler --operation=Conv2d" + " --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32" + " --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3" + " --pad_h=1 --pad_w=1" + " --stride_h=1 --stride_w=1" + " --dilation_h=1 --dilation_w=1\n\n"; +} + +#if 0 +// used this for debugging +static std::string byte_string(std::vector const &bytes) { + std::stringstream ss; + + ss << "0x"; + + for (size_t idx = bytes.size(); idx > 0; --idx) { + ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); + } + + return ss.str(); +} +#endif + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Total number of bytes loaded +int64_t Conv2dOperationProfiler::Conv2dProblem::bytes( + library::ConvDescription const &operation_desc) const { + + cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); + + // Input bytes read and Output bytes written for the gemm problem + int64_t bytes_ = + int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() + + int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() + + int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); + + // Set is_beta_zero true if beta is zero + bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); + + // Output bytes read for the gemm problem for non-zero beta values + if (!is_beta_zero) { + bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); + } + + return bytes_; +} + +/// Total number of flops computed +int64_t Conv2dOperationProfiler::Conv2dProblem::flops( + library::ConvDescription const &operation_desc) const { + + cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); + + int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2; + int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2; + + // Adjust mainloop flop for dgrad strided + if (operation_desc.conv_kind == library::ConvKind::kDgrad) { + flops_mainloop_ = flops_mainloop_ / (stride_h * stride_w); + } + int64_t flops_total_ = flops_mainloop_ + flops_epilogue_; + + //complex-valued support + switch (operation_desc.tile_description.math_instruction.math_operation) { + case library::MathOperationID::kMultiplyAddComplex: + flops_total_ *=4; + break; + + default: break; + } + + return flops_total_; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Extracts the problem dimensions +Status Conv2dOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::ConvDescription const &operation_desc = + static_cast(operation->description()); + + if (!arg_as_int(problem_.n, "n", problem_space, problem)) { + // default value + problem_.n = 1; + } + + if (!arg_as_int(problem_.h, "h", problem_space, problem)) { + // default value + problem_.h = 16; + } + + if (!arg_as_int(problem_.w, "w", problem_space, problem)) { + // default value + problem_.w = 16; + } + + if (!arg_as_int(problem_.c, "c", problem_space, problem)) { + // default value + problem_.c = 64; + } + + if (!arg_as_int(problem_.k, "k", problem_space, problem)) { + // default value + problem_.k = 64; + } + + if (!arg_as_int(problem_.r, "r", problem_space, problem)) { + // default value + problem_.r = 3; + } + + if (!arg_as_int(problem_.s, "s", problem_space, problem)) { + // default value + problem_.s = 3; + } + + if (!arg_as_int(problem_.groups, "g", problem_space, problem)) { + // default value + problem_.groups = 1; + } + + if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) { + // default value + problem_.pad_h = 1; + } + + if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) { + // default value + problem_.pad_w = 1; + } + + if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) { + // default value + problem_.stride_h = 1; + } + + if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) { + // default value + problem_.stride_w = 1; + } + + if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) { + // default value + problem_.dilation_h = 1; + } + + if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) { + // default value + problem_.dilation_w = 1; + } + + //////////////////////// Convolution output dimensions p and q //////////////////////// + // Cutlass convolutions support arbitrary output sizes and not constrained by // + // input, filter, padding, striding, dilation sizes. // + // cuDNN sets the output dimensions (p, q) using following equations: // + // // + // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // + // where; div_up(a, b) : (a - 1)/b + 1 // + // // + // Thus, when output p and q dimensions are unspecified by the user // + // cutlass profiler sets p and q which are cuDNN compliant. // + // // + //////////////////////////////////////////////////////////////////////////////////////// + // set convolution output p + if (!arg_as_int(problem_.p, "p", problem_space, problem)) { + // default value (set using cudnn formula for output height, when p is not provided) + problem_.p = ( + problem_.h + + 2 * problem_.pad_h - + ((problem_.r - 1) * problem_.dilation_h + 1) + ) / (problem_.stride_h) + + 1; + } + + // set convolution output q + if (!arg_as_int(problem_.q, "q", problem_space, problem)) { + // default value (set using cudnn formula for output width, when q is not provided) + problem_.q = ( + problem_.w + + 2 * problem_.pad_w - + ((problem_.s - 1) * problem_.dilation_w + 1) + ) / (problem_.stride_w) + + 1; + } + ///////////////////////////////////////////////////////////////////////////////////////// + + + if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) { + // default value + problem_.split_k_mode = library::SplitKMode::kSerial; + } + + if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + problem_.split_k_slices = 1; + } + + if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) { + // default value + problem_.conv_mode = library::ConvModeID::kCrossCorrelation; + } + + if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) { + // default value + problem_.eq_gemm_provider = library::Provider::kNone; + } + + if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + problem_.alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + problem_.beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + // initialize library::Conv2dConfiguration + conv_workspace_.configuration.problem_size = conv::Conv2dProblemSize( + int(problem_.n), + int(problem_.h), + int(problem_.w), + int(problem_.c), + int(problem_.k), + int(problem_.r), + int(problem_.s), + int(problem_.p), + int(problem_.q), + int(problem_.pad_h), + int(problem_.pad_w), + int(problem_.stride_h), + int(problem_.stride_w), + int(problem_.dilation_h), + int(problem_.dilation_w), + static_cast(static_cast(problem_.conv_mode)), + int(problem_.split_k_slices), + int(problem_.groups) + ); + + conv_workspace_.configuration.split_k_mode = static_cast(static_cast(problem_.split_k_mode)); + + conv_workspace_.set_stride_vector( + problem_, operation_desc.conv_kind, operation_desc.A.layout, + operation_desc.B.layout, operation_desc.C.layout); + + // initialize library::ConvArguments + conv_workspace_.arguments.A = nullptr; + conv_workspace_.arguments.B = nullptr; + conv_workspace_.arguments.C = nullptr; + conv_workspace_.arguments.D = nullptr; + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // initialize reduction operation for parallel splitKMode + if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) { + return Status::kErrorInternal; + } + } + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments); +} + +/// Initializes the performance result +void Conv2dOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::ConvDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "Activation", problem_space, + std::string(library::to_string(operation_desc.activation().element)) + + ":" + library::to_string(operation_desc.activation().layout)); + + set_argument(result, "Filter", problem_space, + std::string(library::to_string(operation_desc.filter().element)) + + ":" + library::to_string(operation_desc.filter().layout)); + + set_argument(result, "Output", problem_space, + std::string(library::to_string(operation_desc.output().element)) + + ":" + library::to_string(operation_desc.output().layout)); + + set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind)); + + set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm))); + + set_argument(result, "n", problem_space, problem_.n); + set_argument(result, "h", problem_space, problem_.h); + set_argument(result, "w", problem_space, problem_.w); + set_argument(result, "c", problem_space, problem_.c); + + set_argument(result, "k", problem_space, problem_.k); + set_argument(result, "r", problem_space, problem_.r); + set_argument(result, "s", problem_space, problem_.s); + + set_argument(result, "p", problem_space, problem_.p); + set_argument(result, "q", problem_space, problem_.q); + + set_argument(result, "g", problem_space, problem_.groups); + + set_argument(result, "pad_h", problem_space, problem_.pad_h); + set_argument(result, "pad_w", problem_space, problem_.pad_w); + + set_argument(result, "stride_h", problem_space, problem_.stride_h); + set_argument(result, "stride_w", problem_space, problem_.stride_w); + + set_argument(result, "dilation_h", problem_space, problem_.dilation_h); + set_argument(result, "dilation_w", problem_space, problem_.dilation_w); + + set_argument(result, "split_k_mode", problem_space, + std::string(library::to_string(problem_.split_k_mode))); + set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices); + + set_argument(result, "conv_mode", problem_space, + std::string(library::to_string(problem_.conv_mode))); + + set_argument(result, "alpha", problem_space, + library::lexical_cast(problem_.alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(problem_.beta, operation_desc.element_epilogue)); + + set_argument(result, "eq_gemm_provider", problem_space, + std::string(library::to_string(problem_.eq_gemm_provider))); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + // Bytes of activation, filter, and output tensors + int64_t activation_bytes = int64_t(library::sizeof_bits(operation_desc.activation().element) / 8) * + conv_workspace_.configuration.problem_size.activation_size(); + + int64_t filter_bytes = int64_t(library::sizeof_bits(operation_desc.filter().element) / 8) * + conv_workspace_.configuration.problem_size.filter_size(); + + int64_t output_bytes = int64_t(library::sizeof_bits(operation_desc.output().element) / 8) * + conv_workspace_.configuration.problem_size.output_size(); + + // Bytes of activation, filter, and output tensors + result.bytes = problem_.bytes(operation_desc); + + // Theoretical flops required for the computation + result.flops = problem_.flops(operation_desc); + + // Measured runtime + result.runtime = 0; + +} + +/// Initialize reduction problem dimensions and library::Operation +bool Conv2dOperationProfiler::initialize_reduction_configuration_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::ConvDescription const &conv_desc = + static_cast(operation->description()); + + library::ConvKind const &conv_kind = conv_desc.conv_kind; + + if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) { + return false; + } + + if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) { + return false; + } + + /// This chooses the appropriate stride element of the row-major C tensor. + int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 2 : 0); + + /// initialize library::ReductionConfiguration + conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn(); + conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); + conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product(); + conv_workspace_.reduction_configuration.ldw = + conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; + conv_workspace_.reduction_configuration.lds = + conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; + conv_workspace_.reduction_configuration.ldd = + conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; + + // find reduction operation + library::ReductionFunctionalKey reduction_key( + library::Provider::kCUTLASS, + conv_desc.tile_description.math_instruction.element_accumulator, // element workspace + conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator + conv_desc.C.element, // element output + conv_desc.element_epilogue // element compute + ); + +#if 0// debug print to check which reduction instance is selected + std::cout << reduction_key << "\n"; +#endif + auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key); + + if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) { + + return false; + } + + // initialize reduction operation required for parallel split-k conv2d operator + reduction_op_ = reduction_it->second; + + // reduction operation found and initialized + return true; +} + + +/// Initializes workspace +Status Conv2dOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + // initialize conv2d underlying operation to handle parallel reduction + library::Operation const* underlying_operation = operation; + + if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { + return Status::kErrorNotSupported; + } + } + + library::ConvDescription const &operation_desc = + static_cast(underlying_operation->description()); + + // Compute the number of copies of the problem to avoid L2 camping. + if (!options.profiling.workspace_count) { + int64_t bytes = problem_.bytes(operation_desc); + if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { + conv_workspace_.problem_count = + 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); + } + else { + conv_workspace_.problem_count = 1; + } + } + else { + conv_workspace_.problem_count = options.profiling.workspace_count; + } + + + if (options.execution_mode != ExecutionMode::kDryRun) { + int seed_shift = 0; + conv_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + problem_.extent_a(operation_desc.conv_kind), + conv_workspace_.configuration.stride_a, + conv_workspace_.problem_count, + seed_shift++ + ); + + conv_workspace_.B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + problem_.extent_b(operation_desc.conv_kind), + conv_workspace_.configuration.stride_b, + conv_workspace_.problem_count, + seed_shift++ + ); + + if(problem_.groups == problem_.c && problem_.groups == problem_.k){ + // Depthwise direct conv kernel needs reorder the filter. + conv_workspace_.reordered_B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + problem_.extent_b(operation_desc.conv_kind), + conv_workspace_.configuration.stride_b, + conv_workspace_.problem_count, + seed_shift++ + ); + } + + conv_workspace_.C = device_context.allocate_tensor( + options, + "C", + operation_desc.C.element, + operation_desc.C.layout, + problem_.extent_c(operation_desc.conv_kind), + conv_workspace_.configuration.stride_c, + conv_workspace_.problem_count, + seed_shift++ + ); + + conv_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.C.element, + operation_desc.C.layout, + problem_.extent_c(operation_desc.conv_kind), + conv_workspace_.configuration.stride_c, + conv_workspace_.problem_count + ); + + conv_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.C.element, + operation_desc.C.layout, + problem_.extent_c(operation_desc.conv_kind), + conv_workspace_.configuration.stride_c, + conv_workspace_.problem_count + ); + } + + // + // Initialize the CUTLASS operation + // + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration); + conv_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration); + conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + + status = underlying_operation->initialize( + &conv_workspace_.configuration, + conv_workspace_.host_workspace.data(), + conv_workspace_.device_workspace.data()); + + if (status != Status::kSuccess) { + return status; + } + + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration); + conv_workspace_.reduction_host_workspace.resize(workspace_size, 0); + + status = reduction_op_->initialize( + &conv_workspace_.reduction_configuration, + conv_workspace_.reduction_host_workspace.data(), + nullptr); + + if (status != Status::kSuccess) { + return status; + } + } + } + + // + // If CUTLASS is enabled, generate a result for it + // + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kConv2d; + results_.back().disposition = Disposition::kNotRun; + + for(auto provider : verification_providers_) { + results_.back().verification_map[provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool Conv2dOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + cudaError_t result; + + // Initialize structure containing Conv2d arguments + conv_workspace_.arguments.A = conv_workspace_.A->data(); + conv_workspace_.arguments.B = conv_workspace_.B->data(); + conv_workspace_.arguments.C = conv_workspace_.C->data(); + conv_workspace_.arguments.D = conv_workspace_.Computed->data(); + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + if (conv_workspace_.reordered_B != nullptr){ + conv_workspace_.arguments.reordered_B = conv_workspace_.reordered_B->data(); + }else{ + conv_workspace_.arguments.reordered_B = nullptr; + } + + conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data()); + + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + // update library::ConvArguments for parallel split-k reduction + conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); + conv_workspace_.arguments.alpha = problem_.alpha_one.data(); + conv_workspace_.arguments.beta = problem_.beta_zero.data(); + + /// initialize library::ReductionArguments + conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); + conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); + conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); + conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); + conv_workspace_.reduction_arguments.beta = problem_.beta.data(); + conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; + } + + // + // Run the CUTLASS operation + // + // initialize conv2d underlying operation to handle parallel reduction + library::Operation const* underlying_operation = operation; + + if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { + results_.back().disposition = Disposition::kFailed; + return false; + } + } + +#if 0 + std::cout << "profiling : " << std::endl + << "conv2d : " << operation->description().name << std::endl + << "underlying conv2d : " << underlying_operation->description().name << std::endl + << "reduction : " << reduction_op_->description().name << std::endl; +#endif + + // run cutlass conv2d operation + results_.back().status = underlying_operation->run( + &conv_workspace_.arguments, + conv_workspace_.host_workspace.data(), + conv_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // Run parallel reduction kernel for parallel split_k_mode + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + + results_.back().status = reduction_op_->run( + &conv_workspace_.reduction_arguments, + conv_workspace_.reduction_host_workspace.data(), + nullptr); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + } + + // Synchronize before running device reference + result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + +#if CUTLASS_ENABLE_CUDNN + // Run verification cudnn reference + if (options.verification.provider_enabled(library::Provider::kCUDNN)) { + + // Guard against unsupported cases + auto const & conv_desc = static_cast(operation->description()); + + Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration); + + // Initialize reference data to the source data + conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); + + if (status == Status::kSuccess) { + // call cudnn verification if supported + verify_with_cudnn_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + else if (status == Status::kErrorInvalidProblem) { + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem; + } + + else { + // set verification map for cudnn to not supported + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; + } + } +#endif // #if CUTLASS_ENABLE_CUDNN + + // Run verification device reference + if (options.verification.provider_enabled(library::Provider::kReferenceDevice)) { + + // Restore reference data back to initial source data + conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); + + verify_with_device_reference_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + // Run verification host reference + if (options.verification.provider_enabled(library::Provider::kReferenceHost)) { + + // Restore reference data back to initial source data + conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); + + verify_with_host_reference_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + for(auto &m : results_.back().verification_map) { + if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if(is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // Return true means continue profiling + return true; +} + + +/// Verifies CUTLASS against host reference +bool Conv2dOperationProfiler::verify_with_host_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + Status status; + + // + // Find host reference operation using conv2d functional description key + // + library::OperationDescription const &desc = operation->description(); + + auto &conv_desc = static_cast(desc); + + library::ConvFunctionalKey conv2d_key( + library::Provider::kReferenceHost, + conv_desc.conv_kind, + conv_desc.A.element, + conv_desc.A.layout, + conv_desc.B.element, + conv_desc.B.layout, + conv_desc.C.element, + conv_desc.C.layout, + conv_desc.tile_description.math_instruction.element_accumulator, + conv_desc.element_epilogue); + +#if 0 // debug print to check which host reference instance is selected + std::cout << conv2d_key << "\n"; +#endif + + auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); + + if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { + + results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; + return true; + } + + // conv2d host reference minimum cc is 0 (CPU) and no iterator algorithm + library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone); + auto cc_it = operators_it->second.find(preference_key); + + if(cc_it == operators_it->second.end()) { + results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; + return true; + } + + // host reference has only one instances in Conv2dOperationVectorMap + library::Operation const *reference_op = cc_it->second[0]; + + // + // Copy input tensors A, B, and C from device to host buffers + // + conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes()); + conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes()); + conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes()); + + conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data()); + conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data()); + conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data()); + + // + // Initialize structure containing Conv2d arguments + // + conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data(); + conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data(); + conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data(); + conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data(); + + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Initialize host reference operation + // + std::vector host_workspace_reference_op; + + uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); + host_workspace_reference_op.resize(workspace_size, 0); + + reference_op->initialize( + &conv_workspace_.configuration, + host_workspace_reference_op.data()); + + // + // Run host reference operation + // + status = reference_op->run( + &conv_workspace_.arguments, + host_workspace_reference_op.data()); + + // Handle errors + if (status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified; + return true; + } + + // + // Copy host reference output to device memory for equality check on device + // + conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D); + + // + // Verify results + // + results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors( + options, + *conv_workspace_.Computed, + *conv_workspace_.Reference, + conv_workspace_.Computed->batch_stride() + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + static_cast(operation->description()), + library::Provider::kCUTLASS, + library::Provider::kReferenceHost); + } + + // Return true means continue profiling + return true; +} + + +/// Verifies CUTLASS against host reference +bool Conv2dOperationProfiler::verify_with_device_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + Status status; + + // + // Find device reference operation using conv2d functional description key + // + library::OperationDescription const &desc = operation->description(); + + auto &conv_desc = static_cast(desc); + + library::ConvFunctionalKey conv2d_key( + library::Provider::kReferenceDevice, + conv_desc.conv_kind, + conv_desc.A.element, + conv_desc.A.layout, + conv_desc.B.element, + conv_desc.B.layout, + conv_desc.C.element, + conv_desc.C.layout, + conv_desc.tile_description.math_instruction.element_accumulator, + conv_desc.element_epilogue); + + auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); + + if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { + + results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; + + return true; + } + + // conv2d device reference minimum cc is 50 and no iterator algorithm + library::ConvPreferenceKey preference_key(50, library::IteratorAlgorithmID::kNone); + auto cc_it = operators_it->second.find(preference_key); + + if(cc_it == operators_it->second.end()) { + results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; + + return true; + } + + // device reference has only one instances in Conv2dOperationVectorMap + library::Operation const *reference_op = cc_it->second[0]; + + // + // Initialize device reference operation + // + std::vector host_workspace_reference_op; + + uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); + host_workspace_reference_op.resize(workspace_size, 0); + + reference_op->initialize( + &conv_workspace_.configuration, + host_workspace_reference_op.data()); + + // Initialize structure containing Conv2d arguments + conv_workspace_.arguments.A = conv_workspace_.A->data(); + conv_workspace_.arguments.B = conv_workspace_.B->data(); + conv_workspace_.arguments.C = conv_workspace_.C->data(); + conv_workspace_.arguments.D = conv_workspace_.Reference->data(); + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Run device reference operation + // + status = reference_op->run( + &conv_workspace_.arguments, + host_workspace_reference_op.data()); + + + // Handle errors + if (status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotVerified; + return true; + } + + // + // Verify results + // + results_.back().verification_map[library::Provider::kReferenceDevice] = compare_tensors( + options, + *conv_workspace_.Computed, + *conv_workspace_.Reference, + conv_workspace_.Computed->batch_stride() + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kReferenceDevice] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + static_cast(operation->description()), + library::Provider::kCUTLASS, + library::Provider::kReferenceDevice); + } + + // Return true means continue profiling + return true; +} + +/// Measures performance results +bool Conv2dOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + // Initialize structure containing Conv2d arguments + conv_workspace_.arguments.A = conv_workspace_.A->data(); + conv_workspace_.arguments.B = conv_workspace_.B->data(); + conv_workspace_.arguments.C = conv_workspace_.C->data(); + conv_workspace_.arguments.D = conv_workspace_.Computed->data(); + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + // update library::ConvArguments for parallel split-k reduction + conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); + conv_workspace_.arguments.alpha = problem_.alpha_one.data(); + conv_workspace_.arguments.beta = problem_.beta_zero.data(); + + /// initialize library::ReductionArguments + conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); + conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); + conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); + conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); + conv_workspace_.reduction_arguments.beta = problem_.beta.data(); + conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; + } + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &conv_workspace_.arguments, + conv_workspace_.host_workspace.data(), + conv_workspace_.device_workspace.data() + ); + } + return true; + +} + +/// Method to profile a CUTLASS Operation +Status Conv2dOperationProfiler::profile_cutlass_( + double &runtime, + Options const &options, + library::Operation const *operation, + void *arguments, + void *host_workspace, + void *device_workspace) { + + GpuTimer timer; + + // initialize conv2d underlying operation to handle parallel reduction + library::Operation const* underlying_operation = operation; + + library::ConvArguments *conv_arguments = static_cast(arguments); + + if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { + return Status::kErrorNotSupported; + } + } + + // + // Optional sleep to limit power consumption and thermals + // + + sleep(options.profiling.sleep_duration); + + // + // Warmup loop + // + + Status status; + + for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { + + // Setup rotating workspace + int workspace_idx = options.profiling.warmup_iterations + iteration; + int problem_idx = (workspace_idx % conv_workspace_.problem_count); + + conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); + conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); + conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); + conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); + + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + // update library::ConvArguments for parallel split-k reduction + conv_arguments->D = conv_workspace_.device_workspace.data(); + + /// initialize library::ReductionArguments + conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); + conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); + conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); + } + + // Run underlying conv2d operation + status = underlying_operation->run( + arguments, + host_workspace, + device_workspace); + + // Run parallel reduction kernel for parallel split_k_mode + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + + status = reduction_op_->run( + &conv_workspace_.reduction_arguments, + conv_workspace_.reduction_host_workspace.data(), + nullptr); + } + + if (status != Status::kSuccess) { + return status; + } + } + + // + // Initialize GPU timer + // + + timer.start(); + + // + // Profiling loop + // + + int Iterations = options.profiling.iterations; + + int iteration = 0; + for (; iteration < Iterations; ++iteration) { + + // Setup rotating workspace + int problem_idx = (iteration % conv_workspace_.problem_count); + + conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); + conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); + conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); + conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); + + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + // update library::ConvArguments for parallel split-k reduction + conv_arguments->D = conv_workspace_.device_workspace.data(); + + /// initialize library::ReductionArguments + conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); + conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); + conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); + } + + // Run underlying conv2d operation + status = underlying_operation->run( + arguments, + host_workspace, + device_workspace); + + // Run parallel reduction kernel for parallel split_k_mode + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + + status = reduction_op_->run( + &conv_workspace_.reduction_arguments, + conv_workspace_.reduction_host_workspace.data(), + nullptr); + } + + if (status != Status::kSuccess) { + return status; + } + } + + // + // Wait for completion + // + + timer.stop_and_wait(); + + // + // Update performance result + // + + runtime = timer.duration(iteration); + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#if CUTLASS_ENABLE_CUDNN + +/// Verifies CUTLASS against cudnn reference +bool Conv2dOperationProfiler::verify_with_cudnn_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + auto &conv_desc = static_cast(operation->description()); + + // + // Construct cudnn operators + // + + CudnnCreate handle; + cudnnStatus_t status = handle.get_cudnn_create_status(); + + if (status != CUDNN_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); + return true; + } + + // + // Initialize state + // + + // Initialize structure containing Conv2d arguments + conv_workspace_.arguments.A = conv_workspace_.A->data(); + conv_workspace_.arguments.B = conv_workspace_.B->data(); + conv_workspace_.arguments.D = conv_workspace_.Reference->data(); + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // cuDNN does not support four tensor arguments, so we copy the tensor C data into + // tensor D. + conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); + conv_workspace_.arguments.C = conv_workspace_.arguments.D; + + try { + + // + // Construct dispatcher to cudnn operator + // + + detail::cudnnConvDispatcher conv_op( + conv_desc, + conv_workspace_.configuration, + conv_workspace_.arguments, + handle + ); + + if (conv_op.status != Status::kSuccess) { + if (conv_op.status == Status::kErrorNotSupported) { + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; + + } else { + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; + } + return true; + } + + + status = conv_op(handle); + + // Handle errors + if (status != CUDNN_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); + return true; + } + + // + // Verify results + // + + results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors( + options, + *conv_workspace_.Computed, + *conv_workspace_.Reference, + conv_workspace_.Computed->batch_stride() + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + conv_desc, + library::Provider::kCUTLASS, + library::Provider::kCUDNN); + } + } + catch (...) { + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; + } + + // Return true means continue profiling + return true; +} + +#endif // #if CUTLASS_ENABLE_CUDNN + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/conv3d_operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/conv3d_operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..27e6e66c70912756281748717da7b719130ef7e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/conv3d_operation_profiler.cu @@ -0,0 +1,1353 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Convolution 3D profiling + +*/ + +#include +#include +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/profiler/conv3d_operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" +///////////////////////////////////////////////////////////////////////////////////////////////// +using namespace cutlass::library; + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +Conv3dOperationProfiler::Conv3dOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kConv3d, + { + {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"}, + {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"d", "input_d"}, "Input D dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"t", "filter_t"}, "Filter T dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"z", "output_z"}, "Output Z dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv3d problem space"}, + {ArgumentTypeID::kInteger, {"pad_d"}, "Padding in D direction"}, + {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"}, + {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"}, + {ArgumentTypeID::kInteger, {"stride_d"}, "Stride in D direction"}, + {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"}, + {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"}, + {ArgumentTypeID::kInteger, {"dilation_d"}, "Dilation in D direction"}, + {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"}, + {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"}, + {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"}, + {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"}, + {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"}, + {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"}, + {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"}, + {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"}, + }, + { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN } + ) { + + description_ = " Conv3d operation. Output(Tensor5D) = alpha * Input(Tensor5D) * Filter(Tensor5D) + beta * Input(Tensor5D)"; + +} + +/// Destructor +Conv3dOperationProfiler::~Conv3dOperationProfiler() { + +} + + +/// Prints usage statement for the math function +void Conv3dOperationProfiler::print_usage(std::ostream &out) const { + out << "Conv3d" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void Conv3dOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular convolution (specify all the convolution parameters):\n" + << " $ cutlass_profiler --operation=Conv3d" + " --Activation=f16:ndhwc --Filter=f16:ndhwc --Output=f16 --accumulator-type=f32" + " --n=32 --d=16 --h=14 --w=14 --c=8 --k=64 --t=3 --r=3 --s=3" + " --pad_d=1 --pad_h=1 --pad_w=1" + " --stride_d=1 --stride::h=1 --stride::w=1" + " --dilation_d=1 --dilation::h=1 --dilation::w=1\n\n"; +} + +#if 0 +// used this for debugging +static std::string byte_string(std::vector const &bytes) { + std::stringstream ss; + + ss << "0x"; + + for (size_t idx = bytes.size(); idx > 0; --idx) { + ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); + } + + return ss.str(); +} +#endif + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +/// Total number of bytes loaded +int64_t Conv3dOperationProfiler::Conv3dProblem::bytes(library::ConvDescription const &operation_desc) const { + cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); + + // Input bytes read and Output bytes written for the gemm problem + int64_t bytes_ = + int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() + + int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() + + int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); + + // Set is_beta_zero true if beta is zero + bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); + + // Output bytes read for the gemm problem for non-zero beta values + if (!is_beta_zero) { + bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); + } + + return bytes_; +} + +/// Total number of flops computed +int64_t Conv3dOperationProfiler::Conv3dProblem::flops( + library::ConvDescription const &operation_desc) const { + + cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); + + int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2; + int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2; + + // Adjust mainloop flop for dgrad strided + if (operation_desc.conv_kind == library::ConvKind::kDgrad) { + flops_mainloop_ = flops_mainloop_ / ( stride_d * stride_h * stride_w); + } + + return (flops_mainloop_ + flops_epilogue_); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Extracts the problem dimensions +Status Conv3dOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::ConvDescription const &operation_desc = + static_cast(operation->description()); + + if (!arg_as_int(problem_.n, "n", problem_space, problem)) { + // default value + problem_.n = 1; + } + + if (!arg_as_int(problem_.d, "d", problem_space, problem)) { + // default value + problem_.d = 8; + } + + if (!arg_as_int(problem_.h, "h", problem_space, problem)) { + // default value + problem_.h = 14; + } + + if (!arg_as_int(problem_.w, "w", problem_space, problem)) { + // default value + problem_.w = 14; + } + + if (!arg_as_int(problem_.c, "c", problem_space, problem)) { + // default value + problem_.c = 32; + } + + if (!arg_as_int(problem_.k, "k", problem_space, problem)) { + // default value + problem_.k = 32; + } + + if (!arg_as_int(problem_.t, "t", problem_space, problem)) { + // default value + problem_.t = 3; + } + + if (!arg_as_int(problem_.r, "r", problem_space, problem)) { + // default value + problem_.r = 3; + } + + if (!arg_as_int(problem_.s, "s", problem_space, problem)) { + // default value + problem_.s = 3; + } + + if (!arg_as_int(problem_.pad_d, "pad_d", problem_space, problem)) { + // default value + problem_.pad_d = 1; + } + + if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) { + // default value + problem_.pad_w = 1; + } + if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) { + // default value + problem_.pad_h = 1; + } + + if (!arg_as_int(problem_.stride_d, "stride_d", problem_space, problem)) { + // default value + problem_.stride_d = 1; + } + + if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) { + // default value + problem_.stride_h = 1; + } + + if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) { + // default value + problem_.stride_w = 1; + } + + if (!arg_as_int(problem_.dilation_d, "dilation_d", problem_space, problem)) { + // default value + problem_.dilation_d = 1; + } + + if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) { + // default value + problem_.dilation_h = 1; + } + + if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) { + // default value + problem_.dilation_w = 1; + } + + //////////////////////// Convolution output dimensions p and q //////////////////////// + // Cutlass convolutions support arbitrary output sizes and not constrained by // + // input, filter, padding, striding, dilation sizes. // + // cuDNN sets the output dimensions (p, q) using following equations: // + // // + // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // + // where; div_up(a, b) : (a - 1)/b + 1 // + // // + // Thus, when output p and q dimensions are unspecified by the user // + // cutlass profiler sets p and q which are cuDNN compliant. // + // // + //////////////////////////////////////////////////////////////////////////////////////// + // set convolution output z + if (!arg_as_int(problem_.z, "z", problem_space, problem)) { + // default value (set using cudnn formula for output height, when p is not provided) + problem_.z = ( + problem_.d + + 2 * problem_.pad_d - + ((problem_.t - 1) * problem_.dilation_d + 1) + ) / (problem_.stride_d) + + 1; + } + + // set convolution output p + if (!arg_as_int(problem_.p, "p", problem_space, problem)) { + // default value (set using cudnn formula for output height, when p is not provided) + problem_.p = ( + problem_.h + + 2 * problem_.pad_h - + ((problem_.r - 1) * problem_.dilation_h + 1) + ) / (problem_.stride_h) + + 1; + } + + // set convolution output q + if (!arg_as_int(problem_.q, "q", problem_space, problem)) { + // default value (set using cudnn formula for output width, when q is not provided) + problem_.q = ( + problem_.w + + 2 * problem_.pad_w - + ((problem_.s - 1) * problem_.dilation_w + 1) + ) / (problem_.stride_w) + + 1; + } + ///////////////////////////////////////////////////////////////////////////////////////// + + + if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) { + // default value + problem_.split_k_mode = library::SplitKMode::kSerial; + } + + if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + problem_.split_k_slices = 1; + } + + if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) { + // default value + problem_.conv_mode = library::ConvModeID::kCrossCorrelation; + } + + if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) { + // default value + problem_.eq_gemm_provider = library::Provider::kNone; + } + + if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + problem_.alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + problem_.beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + // initialize library::ConvConfiguration + conv_workspace_.configuration.problem_size = conv::Conv3dProblemSize( + int(problem_.n), + int(problem_.d), + int(problem_.h), + int(problem_.w), + int(problem_.c), + int(problem_.k), + int(problem_.t), + int(problem_.r), + int(problem_.s), + int(problem_.z), + int(problem_.p), + int(problem_.q), + int(problem_.pad_d), + int(problem_.pad_h), + int(problem_.pad_w), + int(problem_.stride_d), + int(problem_.stride_h), + int(problem_.stride_w), + int(problem_.dilation_d), + int(problem_.dilation_h), + int(problem_.dilation_w), + static_cast(static_cast(problem_.conv_mode)), + int(problem_.split_k_slices), + 1 // groups + ); + + conv_workspace_.configuration.split_k_mode = static_cast(static_cast(problem_.split_k_mode)); + + conv_workspace_.configuration.layout_activations.stride() = make_Coord( + int(problem_.c), + int(problem_.w) * int(problem_.c), + int(problem_.h) * int(problem_.w) * int(problem_.c), + int(problem_.d) * int(problem_.h) * int(problem_.w) * int(problem_.c) + ); + + conv_workspace_.configuration.layout_filters.stride() = make_Coord( + int(problem_.c), + int(problem_.s) * int(problem_.c), + int(problem_.r) * int(problem_.s) * int(problem_.c), + int(problem_.t) * int(problem_.r) * int(problem_.s) * int(problem_.c) + ); + + conv_workspace_.configuration.layout_output.stride() = make_Coord( + int(problem_.k), + int(problem_.q) * int(problem_.k), + int(problem_.q) * int(problem_.p) * int(problem_.k), + int(problem_.z) * int(problem_.q) * int(problem_.p) * int(problem_.k) + ); + + + // initialize library::ConvArguments + conv_workspace_.arguments.A = nullptr; + conv_workspace_.arguments.B = nullptr; + conv_workspace_.arguments.C = nullptr; + conv_workspace_.arguments.D = nullptr; + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // initialize reduction operation for parallel splitKMode not supported for conv3d + if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) { + return Status::kErrorInternal; + } + } + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments); +} + +/// Initializes the performance result +void Conv3dOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::ConvDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "Activation", problem_space, + std::string(library::to_string(operation_desc.activation().element)) + + ":" + library::to_string(operation_desc.activation().layout)); + + set_argument(result, "Filter", problem_space, + std::string(library::to_string(operation_desc.filter().element)) + + ":" + library::to_string(operation_desc.filter().layout)); + + set_argument(result, "Output", problem_space, + std::string(library::to_string(operation_desc.output().element)) + + ":" + library::to_string(operation_desc.output().layout)); + + set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind)); + + set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm))); + + set_argument(result, "n", problem_space, problem_.n); + set_argument(result, "d", problem_space, problem_.d); + set_argument(result, "h", problem_space, problem_.h); + set_argument(result, "w", problem_space, problem_.w); + set_argument(result, "c", problem_space, problem_.c); + + set_argument(result, "k", problem_space, problem_.k); + set_argument(result, "t", problem_space, problem_.t); + set_argument(result, "r", problem_space, problem_.r); + set_argument(result, "s", problem_space, problem_.s); + + set_argument(result, "z", problem_space, problem_.z); + set_argument(result, "p", problem_space, problem_.p); + set_argument(result, "q", problem_space, problem_.q); + + set_argument(result, "pad_d", problem_space, problem_.pad_d); + set_argument(result, "pad_h", problem_space, problem_.pad_h); + set_argument(result, "pad_w", problem_space, problem_.pad_w); + + set_argument(result, "stride_d", problem_space, problem_.stride_d); + set_argument(result, "stride_h", problem_space, problem_.stride_h); + set_argument(result, "stride_w", problem_space, problem_.stride_w); + + set_argument(result, "dilation_d", problem_space, problem_.dilation_d); + set_argument(result, "dilation_h", problem_space, problem_.dilation_h); + set_argument(result, "dilation_w", problem_space, problem_.dilation_w); + + set_argument(result, "split_k_mode", problem_space, + std::string(library::to_string(problem_.split_k_mode))); + set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices); + + set_argument(result, "conv_mode", problem_space, + std::string(library::to_string(problem_.conv_mode))); + + set_argument(result, "alpha", problem_space, + library::lexical_cast(problem_.alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(problem_.beta, operation_desc.element_epilogue)); + + set_argument(result, "eq_gemm_provider", problem_space, + std::string(library::to_string(problem_.eq_gemm_provider))); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + // Bytes of activation, filter, and output tensors + result.bytes = problem_.bytes(operation_desc); + + // Theoretical flops required for the computation + result.flops = problem_.flops(operation_desc); + + // Measured runtime + result.runtime = 0; + +} + +/// Initialize reduction problem dimensions and library::Operation +bool Conv3dOperationProfiler::initialize_reduction_configuration_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::ConvDescription const &conv_desc = + static_cast(operation->description()); + + library::ConvKind const &conv_kind = conv_desc.conv_kind; + + if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) { + return false; + } + + if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) { + return false; + } + + /// This chooses the appropriate stride element of the row-major C tensor. + int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 3 : 0); + + /// initialize library::ReductionConfiguration + conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn(); + conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); + conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product(); + conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; + conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; + conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx]; + + // find reduction operation + library::ReductionFunctionalKey reduction_key( + library::Provider::kCUTLASS, + conv_desc.tile_description.math_instruction.element_accumulator, // element workspace + conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator + conv_desc.C.element, // element output + conv_desc.element_epilogue // element compute + ); + +#if 0// debug print to check which reduction instance is selected + std::cout << reduction_key << "\n"; +#endif + auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key); + + if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) { + + return false; + } + + // initialize reduction operation required for parallel split-k conv2d operator + reduction_op_ = reduction_it->second; + + // reduction operation found and initialized + return true; +} + + +/// Initializes workspace +Status Conv3dOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + // initialize conv2d underlying operation to handle parallel reduction + library::Operation const* underlying_operation = operation; + + if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { + return Status::kErrorNotSupported; + } + } + + library::ConvDescription const &operation_desc = + static_cast(underlying_operation->description()); + + // Compute the number of copies of the problem to avoid L2 camping. + if (!options.profiling.workspace_count) { + int64_t bytes = problem_.bytes(operation_desc); + if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { + conv_workspace_.problem_count = + 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); + } + else { + conv_workspace_.problem_count = 1; + } + } + else { + conv_workspace_.problem_count = options.profiling.workspace_count; + } + + + if (options.execution_mode != ExecutionMode::kDryRun) { + int seed_shift = 0; + conv_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + problem_.extent_a(operation_desc.conv_kind), + conv_workspace_.stride_a(operation_desc.conv_kind), + conv_workspace_.problem_count, + seed_shift++ + ); + + conv_workspace_.B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + problem_.extent_b(operation_desc.conv_kind), + conv_workspace_.stride_b(operation_desc.conv_kind), + conv_workspace_.problem_count, + seed_shift++ + ); + + conv_workspace_.C = device_context.allocate_tensor( + options, + "C", + operation_desc.C.element, + operation_desc.C.layout, + problem_.extent_c(operation_desc.conv_kind), + conv_workspace_.stride_c(operation_desc.conv_kind), + conv_workspace_.problem_count, + seed_shift++ + ); + + conv_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.C.element, + operation_desc.C.layout, + problem_.extent_c(operation_desc.conv_kind), + conv_workspace_.stride_c(operation_desc.conv_kind), + conv_workspace_.problem_count + ); + + conv_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.C.element, + operation_desc.C.layout, + problem_.extent_c(operation_desc.conv_kind), + conv_workspace_.stride_c(operation_desc.conv_kind), + conv_workspace_.problem_count + ); + + } + + // + // Initialize the CUTLASS operation + // + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration); + conv_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration); + conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + + status = underlying_operation->initialize( + &conv_workspace_.configuration, + conv_workspace_.host_workspace.data(), + conv_workspace_.device_workspace.data()); + + if (status != Status::kSuccess) { + return status; + } + + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration); + conv_workspace_.reduction_host_workspace.resize(workspace_size, 0); + + status = reduction_op_->initialize( + &conv_workspace_.reduction_configuration, + conv_workspace_.reduction_host_workspace.data(), + nullptr); + + if (status != Status::kSuccess) { + return status; + } + } + } + + // + // If CUTLASS is enabled, generate a result for it + // + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kConv3d; + results_.back().disposition = Disposition::kNotRun; + + for(auto provider : verification_providers_) { + results_.back().verification_map[provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool Conv3dOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + cudaError_t result; + + // Initialize structure containing Conv arguments + set_cutlass_operator_arguments_(); + + conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data()); + + // + // Run the CUTLASS operation + // + // initialize conv2d underlying operation to handle parallel reduction + library::Operation const* underlying_operation = operation; + + if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { + results_.back().disposition = Disposition::kFailed; + return false; + } + } + +#if 0 + std::cout << "profiling : " << std::endl + << "conv2d : " << operation->description().name << std::endl + << "underlying conv2d : " << underlying_operation->description().name << std::endl + << "reduction : " << reduction_op_->description().name << std::endl; +#endif + + // run cutlass conv2d operation + results_.back().status = underlying_operation->run( + &conv_workspace_.arguments, + conv_workspace_.host_workspace.data(), + conv_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // Run parallel reduction kernel for parallel split_k_mode + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + + results_.back().status = reduction_op_->run( + &conv_workspace_.reduction_arguments, + conv_workspace_.reduction_host_workspace.data(), + nullptr); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + } + + // Synchronize before running device reference + result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + +#if CUTLASS_ENABLE_CUDNN + // Run verification cudnn reference + if (options.verification.provider_enabled(library::Provider::kCUDNN)) { + + // Guard against unsupported cases + auto const & conv_desc = static_cast(operation->description()); + + Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration); + + // Initialize reference data to the source data + conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); + + if (status == Status::kSuccess) { + // call cudnn verification if supported + verify_with_cudnn_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + else if (status == Status::kErrorInvalidProblem) { + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem; + } + + else { + // set verification map for cudnn to not supported + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; + } + } +#endif // #if CUTLASS_ENABLE_CUDNN + + // Run verification host reference + if (options.verification.provider_enabled(library::Provider::kReferenceHost)) { + + // Restore reference data back to initial source data + conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); + + verify_with_host_reference_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + for(auto &m : results_.back().verification_map) { + if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if(is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // Return true means continue profiling + return true; +} + + +/// Verifies CUTLASS against host reference +bool Conv3dOperationProfiler::verify_with_host_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + Status status; + + // + // Find host reference operation using conv functional description key + // + library::OperationDescription const &desc = operation->description(); + + auto &conv_desc = static_cast(desc); + + library::ConvFunctionalKey conv_key( + library::Provider::kReferenceHost, + conv_desc.conv_kind, + conv_desc.A.element, + conv_desc.A.layout, + conv_desc.B.element, + conv_desc.B.layout, + conv_desc.C.element, + conv_desc.C.layout, + conv_desc.tile_description.math_instruction.element_accumulator, + conv_desc.element_epilogue); + +#if 0 // debug print to check which host reference instance is selected + std::cout << conv_key << "\n"; +#endif + + auto operators_it = Singleton::get().operation_table.conv3d_operations.find(conv_key); + + if(operators_it == Singleton::get().operation_table.conv3d_operations.end()) { + + results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; + return true; + } + + // conv3d host reference minimum cc is 0 (CPU) and no iterator algorithm + library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone); + auto cc_it = operators_it->second.find(preference_key); + + if(cc_it == operators_it->second.end()) { + results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; + return true; + } + + // host reference has only one instances in ConvOperationVectorMap + library::Operation const *reference_op = cc_it->second[0]; + + // + // Copy input tensors A, B, and C from device to host buffers + // + conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes()); + conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes()); + conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes()); + conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data()); + conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data()); + conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data()); + + // + // Initialize structure containing Conv3d arguments + // + conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data(); + conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data(); + conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data(); + conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data(); + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Initialize host reference operation + // + std::vector host_workspace_reference_op; + + uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); + host_workspace_reference_op.resize(workspace_size, 0); + + reference_op->initialize( + &conv_workspace_.configuration, + host_workspace_reference_op.data()); + + // + // Run host reference operation + // + status = reference_op->run( + &conv_workspace_.arguments, + host_workspace_reference_op.data()); + + // Handle errors + if (status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified; + return true; + } + + // + // Copy host reference output to device memory for equality check on device + // + conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D); + + // + // Verify results + // + results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors( + options, + *conv_workspace_.Computed, + *conv_workspace_.Reference, + conv_workspace_.Computed->batch_stride() + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + static_cast(operation->description()), + library::Provider::kCUTLASS, + library::Provider::kReferenceHost); + } + + // Return true means continue profiling + return true; +} + + +/// Verifies CUTLASS against host reference +bool Conv3dOperationProfiler::verify_with_device_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + // TODO: verify cutlass conv3d against device reference + + // Return true means continue profiling + return true; +} + +/// Measures performance results +bool Conv3dOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + set_cutlass_operator_arguments_(); + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &conv_workspace_.arguments, + conv_workspace_.host_workspace.data(), + conv_workspace_.device_workspace.data() + ); + } + return true; + +} + +/// Updates the arguments structure for the CUTLASS operator based on +/// the problem index. +void Conv3dOperationProfiler::set_cutlass_operator_arguments_(int problem_idx) { + // Initialize structure containing Conv3d arguments + conv_workspace_.arguments.A = conv_workspace_.A->batch_data(problem_idx); + conv_workspace_.arguments.B = conv_workspace_.B->batch_data(problem_idx); + conv_workspace_.arguments.C = conv_workspace_.C->batch_data(problem_idx); + conv_workspace_.arguments.D = conv_workspace_.Computed->batch_data(problem_idx); + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + // update library::ConvArguments for parallel split-k reduction + conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); + conv_workspace_.arguments.alpha = problem_.alpha_one.data(); + conv_workspace_.arguments.beta = problem_.beta_zero.data(); + + /// initialize library::ReductionArguments + conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); + conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); + conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); + conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); + conv_workspace_.reduction_arguments.beta = problem_.beta.data(); + conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; + } +} + +/// Method to profile a CUTLASS Operation +Status Conv3dOperationProfiler::profile_cutlass_( + double &runtime, + Options const &options, + library::Operation const *operation, + void *arguments, + void *host_workspace, + void *device_workspace) { + + GpuTimer timer; + + // initialize conv2d underlying operation to handle parallel reduction + library::Operation const* underlying_operation = operation; + + if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { + return Status::kErrorNotSupported; + } + } + + // + // Optional sleep to limit power consumption and thermals + // + + sleep(options.profiling.sleep_duration); + + // + // Warmup loop + // + + Status status; + + for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { + + // Setup rotating workspace + int workspace_idx = options.profiling.warmup_iterations + iteration; + int problem_idx = (workspace_idx % conv_workspace_.problem_count); + + set_cutlass_operator_arguments_(problem_idx); + + // Run underlying conv2d operation + status = underlying_operation->run( + arguments, + host_workspace, + device_workspace); + + // Run parallel reduction kernel for parallel split_k_mode + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + + status = reduction_op_->run( + &conv_workspace_.reduction_arguments, + conv_workspace_.reduction_host_workspace.data(), + nullptr); + } + + if (status != Status::kSuccess) { + return status; + } + } + + // + // Initialize GPU timer + // + + timer.start(); + + // + // Profiling loop + // + + int Iterations = options.profiling.iterations; + + int iteration = 0; + for (; iteration < Iterations; ++iteration) { + + // Setup rotating workspace + int problem_idx = (iteration % conv_workspace_.problem_count); + + set_cutlass_operator_arguments_(problem_idx); + + // Run underlying conv2d operation + status = underlying_operation->run( + arguments, + host_workspace, + device_workspace); + + // Run parallel reduction kernel for parallel split_k_mode + if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { + status = reduction_op_->run( + &conv_workspace_.reduction_arguments, + conv_workspace_.reduction_host_workspace.data(), + nullptr); + } + + if (status != Status::kSuccess) { + return status; + } + } + + // + // Wait for completion + // + + timer.stop_and_wait(); + + // + // Update performance result + // + + runtime = timer.duration(iteration); + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +#if CUTLASS_ENABLE_CUDNN + +/// Verifies CUTLASS against cudnn reference +bool Conv3dOperationProfiler::verify_with_cudnn_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + auto &conv_desc = static_cast(operation->description()); + + // + // Construct cudnn operators + // + + CudnnCreate handle; + cudnnStatus_t status = handle.get_cudnn_create_status(); + + if (status != CUDNN_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); + return true; + } + + // + // Initialize state + // + + // Initialize structure containing Conv2d arguments + conv_workspace_.arguments.A = conv_workspace_.A->data(); + conv_workspace_.arguments.B = conv_workspace_.B->data(); + conv_workspace_.arguments.D = conv_workspace_.Reference->data(); + conv_workspace_.arguments.alpha = problem_.alpha.data(); + conv_workspace_.arguments.beta = problem_.beta.data(); + conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // cuDNN does not support four tensor arguments, so we copy the tensor C data into + // tensor D. + conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); + conv_workspace_.arguments.C = conv_workspace_.arguments.D; + + try { + + // + // Construct dispatcher to cudnn operator + // + + detail::cudnnConvDispatcher conv_op( + conv_desc, + conv_workspace_.configuration, + conv_workspace_.arguments, + handle + ); + + if (conv_op.status != Status::kSuccess) { + if (conv_op.status == Status::kErrorNotSupported) { + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; + + } else { + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; + } + return true; + } + + + status = conv_op(handle); + + // Handle errors + if (status != CUDNN_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); + return true; + } + + // + // Verify results + // + + results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors( + options, + *conv_workspace_.Computed, + *conv_workspace_.Reference + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + conv_desc, + library::Provider::kCUTLASS, + library::Provider::kCUDNN); + } + } + catch (...) { + results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; + } + + // Return true means continue profiling + return true; + +} + +#endif // #if CUTLASS_ENABLE_CUDNN + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cublas_helpers.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cublas_helpers.cu new file mode 100644 index 0000000000000000000000000000000000000000..cf2dea90e9587530ac42154100ad7e83aa3c7638 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cublas_helpers.cu @@ -0,0 +1,1181 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Helper functions for mapping CUTLASS concepts to cuBLAS. +*/ + +#include + +#if CUTLASS_ENABLE_CUBLAS +#include "cutlass/profiler/cublas_helpers.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Converts a cuBLAS status to cutlass::Status +Status get_cutlass_status(cublasStatus_t cublas) { + + switch (cublas) { + case CUBLAS_STATUS_SUCCESS: + return Status::kSuccess; + case CUBLAS_STATUS_INVALID_VALUE: + return Status::kErrorInvalidProblem; + case CUBLAS_STATUS_NOT_SUPPORTED: + return Status::kErrorNotSupported; + default: break; + } + return Status::kErrorInternal; +} + +/// Converts a cuBLAS status to cutlass::profiler::Disposition +Disposition get_cutlass_disposition(cublasStatus_t cublas_status) { + + if (cublas_status == CUBLAS_STATUS_INVALID_VALUE) { + return Disposition::kInvalidProblem; + } + else if (cublas_status == CUBLAS_STATUS_NOT_SUPPORTED) { + return Disposition::kNotSupported; + } + return Disposition::kFailed; +} + +/// Maps a CUTLASS tensor layout to a cuBLAS transpose operation +bool get_cublas_transpose_operation( + cublasOperation_t &operation, + library::LayoutTypeID layout, + library::ComplexTransform transform) { + + switch (layout) { + case library::LayoutTypeID::kColumnMajor: + if (transform == library::ComplexTransform::kNone) { + operation = CUBLAS_OP_N; + return true; + } + else { + return false; + } + break; + case library::LayoutTypeID::kRowMajor: + if (transform == library::ComplexTransform::kNone) { + operation = CUBLAS_OP_T; + return true; + } + else if (transform == library::ComplexTransform::kConjugate) { + operation = CUBLAS_OP_C; + return true; + } + break; + default: break; + } + + return false; +} + +/// Maps a CUTLASS numeric type to a cuBLAS data type enumeration +bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type) { + switch (element_type) { + case library::NumericTypeID::kFE4M3: +#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) + data_type = CUDA_R_8F_E4M3; + return true; +#endif + break; + + case library::NumericTypeID::kFE5M2: +#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) + data_type = CUDA_R_8F_E5M2; + return true; +#endif + break; + + case library::NumericTypeID::kF16: + data_type = CUDA_R_16F; + return true; + + case library::NumericTypeID::kBF16: + data_type = CUDA_R_16BF; + return true; + + case library::NumericTypeID::kTF32: + break; + + case library::NumericTypeID::kF32: + data_type = CUDA_R_32F; + return true; + + case library::NumericTypeID::kF64: + data_type = CUDA_R_64F; + return true; + + case library::NumericTypeID::kS4: + break; + + case library::NumericTypeID::kS8: + data_type = CUDA_R_8I; + return true; + + case library::NumericTypeID::kS16: + break; + + case library::NumericTypeID::kS32: + data_type = CUDA_R_32I; + return true; + + case library::NumericTypeID::kS64: + break; + + case library::NumericTypeID::kU4: + break; + + case library::NumericTypeID::kU8: + data_type = CUDA_R_8U; + return true; + + case library::NumericTypeID::kU16: + break; + + case library::NumericTypeID::kU32: + data_type = CUDA_R_32U; + return true; + + case library::NumericTypeID::kU64: + break; + + case library::NumericTypeID::kB1: + break; + + case library::NumericTypeID::kCF32: + data_type = CUDA_C_32F; + return true; + + case library::NumericTypeID::kCF64: + data_type = CUDA_C_64F; + return true; + + case library::NumericTypeID::kInvalid: + + default: + break; + } + + return false; +} + +/// Maps a cutlass::SideMode to cuBLAS side mode +bool get_cublas_side_mode(cublasSideMode_t& side, SideMode side_mode) { + + switch (side_mode) { + case SideMode::kLeft: + side = CUBLAS_SIDE_LEFT; + return true; + case SideMode::kRight: + side = CUBLAS_SIDE_RIGHT; + return true; + default: break; + } + + return false; +} + +/// Maps a cutlass::FillMode to cuBLAS fill mode +bool get_cublas_fill_mode(cublasFillMode_t& uplo, FillMode fill_mode) { + + switch (fill_mode) { + case FillMode::kLower: + uplo = CUBLAS_FILL_MODE_LOWER; + return true; + case FillMode::kUpper: + uplo = CUBLAS_FILL_MODE_UPPER; + return true; + default: break; + } + + return false; +} + +/// Maps a cutlass::DiagType to cuBLAS diag type +bool get_cublas_diag_type(cublasDiagType_t& diag, DiagType diag_type) { + + switch (diag_type) { + case DiagType::kNonUnit: + diag = CUBLAS_DIAG_NON_UNIT; + return true; + case DiagType::kUnit: + diag = CUBLAS_DIAG_UNIT; + return true; + default: break; + } + + return false; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Gets the cublas algorithm given threadblock tile dimensions and math opcode class +cublasGemmAlgo_t get_cublas_gemm_algo(int cta_m, int cta_n, int cta_k, library::OpcodeClassID opcode_class) { + return (opcode_class == library::OpcodeClassID::kSimt ? + CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns a status if cuBLAS can satisfy a particular GEMM description +Status cublas_satisfies(library::GemmDescription const &desc) { + auto const &math_instruction = desc.tile_description.math_instruction; + + if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && + math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { + + return Status::kErrorNotSupported; + } + + // output type S4 and S8 not supported in cuBLAS + if (desc.C.element == library::NumericTypeID::kS4 || + desc.C.element == library::NumericTypeID::kS8) { + + return Status::kErrorNotSupported; + } + + // input type BF16 and TF32 not supported in cuBLAS + if (desc.A.element == library::NumericTypeID::kBF16 || + desc.A.element == library::NumericTypeID::kTF32) { + + return Status::kErrorNotSupported; + } + + return Status::kSuccess; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +cublasGemmExDispatcher::cublasGemmExDispatcher( + library::GemmDescription const &op_desc, + library::GemmUniversalConfiguration configuration_, + library::GemmUniversalArguments arguments_, + cublasGemmAlgo_t algorithm +): + configuration(configuration_), arguments(arguments_), algo(algorithm), status(Status::kSuccess) { + + bool good = true; + + good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); + good = (good && get_cublas_transpose_operation(trans_B, op_desc.B.layout, op_desc.transform_B)); + good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); + good = (good && get_cublas_datatype(data_type_B, op_desc.B.element)); + good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); + + good = (good && get_cublas_datatype( + compute_data_type, + op_desc.tile_description.math_instruction.element_accumulator)); + + // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe + // internal numerical data types used in the computation. +#if (__CUDACC_VER_MAJOR__ >= 11) + library::OpcodeClassID const & opcode_class = + op_desc.tile_description.math_instruction.opcode_class; + + if (good && + op_desc.A.element == library::NumericTypeID::kF32 && + op_desc.B.element == library::NumericTypeID::kF32 && + opcode_class == library::OpcodeClassID::kTensorOp) { + + compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; + } + else if (good) { + bool const isPedantic = false; + switch (compute_data_type) { + case CUDA_R_32F: + case CUDA_C_32F: + compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; + break; + case CUDA_R_64F: + case CUDA_C_64F: + compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; + break; + case CUDA_R_16F: + compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; + break; + case CUDA_R_32I: + compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; + break; + default: + good = false; + break; + } + } +#endif // __CUDACC_VER_MAJOR__ >= 11 + + if (!good) { + status = Status::kErrorNotSupported; + } +} + +/// Executes GEMM using these arguments +cublasStatus_t cublasGemmExDispatcher::operator()(cublasHandle_t handle) { + + if (configuration.mode == library::GemmUniversalMode::kBatched) { + return cublasGemmStridedBatchedEx( + handle, + trans_A, + trans_B, + configuration.problem_size.m(), + configuration.problem_size.n(), + configuration.problem_size.k(), + arguments.alpha, + arguments.A, + data_type_A, + int(configuration.lda), + arguments.batch_stride_A, + arguments.B, + data_type_B, + int(configuration.ldb), + arguments.batch_stride_B, + arguments.beta, + arguments.D, + data_type_C, + int(configuration.ldc), + arguments.batch_stride_C, + configuration.batch_count, + #if (__CUDACC_VER_MAJOR__ >= 11) + compute_type, + #else + compute_data_type, + #endif + algo + ); + } + else { + return cublasGemmEx( + handle, + trans_A, + trans_B, + configuration.problem_size.m(), + configuration.problem_size.n(), + configuration.problem_size.k(), + arguments.alpha, + arguments.A, + data_type_A, + int(configuration.lda), + arguments.B, + data_type_B, + int(configuration.ldb), + arguments.beta, + arguments.D, + data_type_C, + int(configuration.ldc), + #if (__CUDACC_VER_MAJOR__ >= 11) + compute_type, + #else + compute_data_type, + #endif + algo + ); + } +} + +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns a status if cuBLAS can satisfy a particular RankK description +Status cublas_satisfies(library::RankKDescription const &desc) { + auto const &math_instruction = desc.tile_description.math_instruction; + + if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && + math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { + + return Status::kErrorNotSupported; + } + + // output type S4 and S8 not supported in cuBLAS + if (desc.C.element == library::NumericTypeID::kS4 || + desc.C.element == library::NumericTypeID::kS8) { + + return Status::kErrorNotSupported; + } + + // input type BF16 and TF32 not supported in cuBLAS + if (desc.A.element == library::NumericTypeID::kBF16 || + desc.A.element == library::NumericTypeID::kTF32) { + + return Status::kErrorNotSupported; + } + + return Status::kSuccess; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +cublasRankKDispatcher::cublasRankKDispatcher( + library::RankKDescription const &op_desc, + library::RankKConfiguration configuration_, + library::RankKArguments arguments_ +): + configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { + + blas_mode = op_desc.blas_mode; + num_ranks = op_desc.num_ranks; + + bool good = true; + + good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); + good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); + good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); + good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); + + good = (good && get_cublas_datatype( + compute_data_type, + op_desc.tile_description.math_instruction.element_accumulator)); + + // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe + // internal numerical data types used in the computation. +#if (__CUDACC_VER_MAJOR__ >= 11) + library::OpcodeClassID const & opcode_class = + op_desc.tile_description.math_instruction.opcode_class; + + if (good && + op_desc.A.element == library::NumericTypeID::kF32 && + opcode_class == library::OpcodeClassID::kTensorOp) { + + compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; + } + else if (good) { + bool const isPedantic = false; + switch (compute_data_type) { + case CUDA_R_32F: + case CUDA_C_32F: + compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; + break; + case CUDA_R_64F: + case CUDA_C_64F: + compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; + break; + case CUDA_R_16F: + compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; + break; + case CUDA_R_32I: + compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; + break; + default: + good = false; + break; + } + } +#endif // __CUDACC_VER_MAJOR__ >= 11 + + if (!good) { + status = Status::kErrorNotSupported; + } +} + +/// Executes RankK using these arguments +cublasStatus_t cublasRankKDispatcher::operator()(cublasHandle_t handle) { + + // SYRK and HERK + if (num_ranks == 1) { + if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) { + return cublasDsyrk( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) { + + #if (__CUDACC_VER_MAJOR__ >= 11) + if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) + return CUBLAS_STATUS_NOT_SUPPORTED; + #endif + + return cublasSsyrk( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) { + + if (blas_mode == BlasMode::kHermitian) { + return cublasZherk( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + else { + return cublasZsyrk( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + + } else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) { + + #if (__CUDACC_VER_MAJOR__ >= 11) + if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) + return CUBLAS_STATUS_NOT_SUPPORTED; + #endif + + if (blas_mode == BlasMode::kHermitian) { + return cublasCherk( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + else { + return cublasCsyrk( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + } else { + return CUBLAS_STATUS_NOT_SUPPORTED; + } + } + + // SYR2K and HER2K + else if (num_ranks == 2) { + if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) { + return cublasDsyr2k( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) { + + #if (__CUDACC_VER_MAJOR__ >= 11) + if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) + return CUBLAS_STATUS_NOT_SUPPORTED; + #endif + + return cublasSsyr2k( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) { + + if (blas_mode == BlasMode::kHermitian) { + return cublasZher2k( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + else { + return cublasZsyr2k( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + + } else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) { + + #if (__CUDACC_VER_MAJOR__ >= 11) + if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) + return CUBLAS_STATUS_NOT_SUPPORTED; + #endif + + if (blas_mode == BlasMode::kHermitian) { + return cublasCher2k( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + else { + return cublasCsyr2k( + handle, + uplo, + trans_A, + configuration.problem_size.n(), + configuration.problem_size.k(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + } else { + return CUBLAS_STATUS_NOT_SUPPORTED; + } + } + else { + return CUBLAS_STATUS_NOT_SUPPORTED; + } +} + +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns a status if cuBLAS can satisfy a particular TRMM description +Status cublas_satisfies(library::TrmmDescription const &desc) { + auto const &math_instruction = desc.tile_description.math_instruction; + + if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && + math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { + + return Status::kErrorNotSupported; + } + + // output type S4 and S8 not supported in cuBLAS + if (desc.D.element == library::NumericTypeID::kS4 || + desc.D.element == library::NumericTypeID::kS8) { + + return Status::kErrorNotSupported; + } + + // input type BF16 and TF32 not supported in cuBLAS + if (desc.A.element == library::NumericTypeID::kBF16 || + desc.A.element == library::NumericTypeID::kTF32) { + + return Status::kErrorNotSupported; + } + + return Status::kSuccess; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +cublasTrmmDispatcher::cublasTrmmDispatcher( + library::TrmmDescription const &op_desc, + library::TrmmConfiguration configuration_, + library::TrmmArguments arguments_ +): + configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { + + bool good = true; + + good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); + good = (good && get_cublas_side_mode(side, op_desc.side_mode)); + good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); + good = (good && get_cublas_diag_type(diag, op_desc.diag_type)); + good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); + good = (good && get_cublas_datatype(data_type_B, op_desc.B.element)); + good = (good && get_cublas_datatype(data_type_D, op_desc.D.element)); + + // if A is Transposed, then for cuBLAS that is inverted Fill Mode. + if (trans_A == CUBLAS_OP_T || trans_A == CUBLAS_OP_C) { + if (uplo == CUBLAS_FILL_MODE_LOWER) + uplo = CUBLAS_FILL_MODE_UPPER; + else + uplo = CUBLAS_FILL_MODE_LOWER; + } + + good = (good && get_cublas_datatype( + compute_data_type, + op_desc.tile_description.math_instruction.element_accumulator)); + + // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe + // internal numerical data types used in the computation. +#if (__CUDACC_VER_MAJOR__ >= 11) + library::OpcodeClassID const & opcode_class = + op_desc.tile_description.math_instruction.opcode_class; + + if (good && + op_desc.A.element == library::NumericTypeID::kF32 && + opcode_class == library::OpcodeClassID::kTensorOp) { + + compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; + } + else if (good) { + bool const isPedantic = false; + switch (compute_data_type) { + case CUDA_R_32F: + case CUDA_C_32F: + compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; + break; + case CUDA_R_64F: + case CUDA_C_64F: + compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; + break; + case CUDA_R_16F: + compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; + break; + case CUDA_R_32I: + compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; + break; + default: + good = false; + break; + } + } +#endif // __CUDACC_VER_MAJOR__ >= 11 + + if (!good) { + status = Status::kErrorNotSupported; + } +} + +/// Executes TRMM using these arguments +cublasStatus_t cublasTrmmDispatcher::operator()(cublasHandle_t handle) { + + if (data_type_A == data_type_D && data_type_A == CUDA_R_64F) { + return cublasDtrmm( + handle, + side, + uplo, + trans_A, + diag, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.D), + int(configuration.ldd) + ); + } else if (data_type_A == data_type_D && data_type_A == CUDA_R_32F) { + +#if (__CUDACC_VER_MAJOR__ >= 11) + if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) + return CUBLAS_STATUS_NOT_SUPPORTED; +#endif + + return cublasStrmm( + handle, + side, + uplo, + trans_A, + diag, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.D), + int(configuration.ldd) + ); + } else if (data_type_A == data_type_D && data_type_A == CUDA_C_64F) { + return cublasZtrmm( + handle, + side, + uplo, + trans_A, + diag, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.D), + int(configuration.ldd) + ); + } else if (data_type_A == data_type_D && data_type_A == CUDA_C_32F) { + +#if (__CUDACC_VER_MAJOR__ >= 11) + if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) + return CUBLAS_STATUS_NOT_SUPPORTED; +#endif + + return cublasCtrmm( + handle, + side, + uplo, + trans_A, + diag, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.D), + int(configuration.ldd) + ); + } else { + return CUBLAS_STATUS_NOT_SUPPORTED; + } +} + +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns a status if cuBLAS can satisfy a particular Symm description +Status cublas_satisfies(library::SymmDescription const &desc) { + auto const &math_instruction = desc.tile_description.math_instruction; + + if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && + math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { + + return Status::kErrorNotSupported; + } + + // output type S4 and S8 not supported in cuBLAS + if (desc.C.element == library::NumericTypeID::kS4 || + desc.C.element == library::NumericTypeID::kS8) { + + return Status::kErrorNotSupported; + } + + // input type BF16 and TF32 not supported in cuBLAS + if (desc.A.element == library::NumericTypeID::kBF16 || + desc.A.element == library::NumericTypeID::kTF32) { + + return Status::kErrorNotSupported; + } + + // input type BF16 and TF32 not supported in cuBLAS + if (desc.B.element == library::NumericTypeID::kBF16 || + desc.B.element == library::NumericTypeID::kTF32) { + + return Status::kErrorNotSupported; + } + + // only column major layout is supported in cuBLAS + if (desc.A.layout != library::LayoutTypeID::kColumnMajor || + desc.transform_A != library::ComplexTransform::kNone) { + + return Status::kErrorNotSupported; +} + + return Status::kSuccess; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +cublasSymmDispatcher::cublasSymmDispatcher( + library::SymmDescription const &op_desc, + library::SymmConfiguration configuration_, + library::SymmArguments arguments_ +): + configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { + + blas_mode = op_desc.blas_mode; + + bool good = true; + + good = (good && get_cublas_side_mode(side, op_desc.side_mode)); + good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); + good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); + good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); + + good = (good && get_cublas_datatype( + compute_data_type, + op_desc.tile_description.math_instruction.element_accumulator)); + + // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe + // internal numerical data types used in the computation. +#if (__CUDACC_VER_MAJOR__ >= 11) + library::OpcodeClassID const & opcode_class = + op_desc.tile_description.math_instruction.opcode_class; + + if (good && + op_desc.A.element == library::NumericTypeID::kF32 && + opcode_class == library::OpcodeClassID::kTensorOp) { + + compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; + } + else if (good) { + bool const isPedantic = false; + switch (compute_data_type) { + case CUDA_R_32F: + case CUDA_C_32F: + compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; + break; + case CUDA_R_64F: + case CUDA_C_64F: + compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; + break; + case CUDA_R_16F: + compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; + break; + case CUDA_R_32I: + compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; + break; + default: + good = false; + break; + } + } +#endif // __CUDACC_VER_MAJOR__ >= 11 + + if (!good) { + status = Status::kErrorNotSupported; + } +} + +/// Executes Symm using these arguments +cublasStatus_t cublasSymmDispatcher::operator()(cublasHandle_t handle) { + + // SYMM and HEMM + if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) { + return cublasDsymm( + handle, + side, + uplo, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) { + +#if (__CUDACC_VER_MAJOR__ >= 11) + if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) + return CUBLAS_STATUS_NOT_SUPPORTED; +#endif + + return cublasSsymm( + handle, + side, + uplo, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) { + + if (blas_mode == BlasMode::kHermitian) { + return cublasZhemm( + handle, + side, + uplo, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + else { + return cublasZsymm( + handle, + side, + uplo, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + + } else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) { + +#if (__CUDACC_VER_MAJOR__ >= 11) + if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) + return CUBLAS_STATUS_NOT_SUPPORTED; +#endif + + if (blas_mode == BlasMode::kHermitian) { + return cublasChemm( + handle, + side, + uplo, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + else { + return cublasCsymm( + handle, + side, + uplo, + configuration.problem_size.m(), + configuration.problem_size.n(), + static_cast(arguments.alpha), + static_cast(arguments.A), + int(configuration.lda), + static_cast(arguments.B), + int(configuration.ldb), + static_cast(arguments.beta), + static_cast(arguments.D), + int(configuration.ldc) + ); + } + } else { + return CUBLAS_STATUS_NOT_SUPPORTED; + } +} + +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +#endif // #if CUTLASS_ENABLE_CUBLAS diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cudnn_helpers.cpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cudnn_helpers.cpp new file mode 100644 index 0000000000000000000000000000000000000000..254cbaebd22ac2b766e59470e35b73425dcb94f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cudnn_helpers.cpp @@ -0,0 +1,496 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Helper functions for mapping CUTLASS concepts to cuDNN. +*/ +#if CUTLASS_ENABLE_CUDNN + +#include + +#include "cutlass/profiler/cudnn_helpers.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Converts a cuDNN status to cutlass::Status +Status get_cutlass_status(cudnnStatus_t cudnn_status) { + + if (cudnn_status == CUDNN_STATUS_SUCCESS) { + return Status::kSuccess; + } + else if (cudnn_status == CUDNN_STATUS_INVALID_VALUE) { + return Status::kErrorInvalidProblem; + } + if (cudnn_status == CUDNN_STATUS_NOT_SUPPORTED) { + return Status::kErrorNotSupported; + } + return Status::kErrorInternal; +} + +/// Converts a cuDNN status to cutlass::profiler::Disposition +Disposition get_cutlass_disposition(cudnnStatus_t cudnn_status) { + + if (cudnn_status == CUDNN_STATUS_INVALID_VALUE) { + return Disposition::kInvalidProblem; + } + else if (cudnn_status == CUDNN_STATUS_NOT_SUPPORTED) { + return Disposition::kNotSupported; + } + return Disposition::kFailed; +} + +/// Checks cudnnStatus_t converts to cutlas status and returns if Status::kSuccess o.w. throws exception +Status checkCudnnErr(cudnnStatus_t cudnn_status) { + Status cutlass_status = get_cutlass_status(cudnn_status); + if(cutlass_status != Status::kSuccess) { + throw std::runtime_error("checkCudnnErr failed"); + } + return cutlass_status; +} + +/// Maps a CUTLASS conv mode to a cuDNN cudnnConvolutionMode_t +bool get_cudnn_conv_mode(cudnnConvolutionMode_t &cudnn_conv_mode, conv::Mode conv_mode) { + switch (conv_mode) { + case conv::Mode::kCrossCorrelation: + cudnn_conv_mode = CUDNN_CROSS_CORRELATION; + return true; + case conv::Mode::kConvolution: + cudnn_conv_mode = CUDNN_CONVOLUTION; + return true; + default: break; + } + return false; +} + +/// Maps a CUTLASS tensor layout to a cuDNN cudnnTensorFormat_t +bool get_cudnn_layout(cudnnTensorFormat_t &cudnn_layout, library::LayoutTypeID layout) { + switch (layout) { + // cudnn uses the same enum for TensorNC*HW along nDim (ConvDescription::conv_dim) + case library::LayoutTypeID::kTensorNCHW: + case library::LayoutTypeID::kTensorNCDHW: + cudnn_layout = CUDNN_TENSOR_NCHW; + return true; + case library::LayoutTypeID::kTensorNHWC: + case library::LayoutTypeID::kTensorNDHWC: + cudnn_layout = CUDNN_TENSOR_NHWC; + return true; + default: break; + } + return false; +} + +/// Maps a CUTLASS numeric type to a cuDNN cudnnDataType_t +bool get_cudnn_datatype(cudnnDataType_t &cudnn_element_type, library::NumericTypeID element_type) { + switch (element_type) { + case library::NumericTypeID::kF16: + cudnn_element_type = CUDNN_DATA_HALF; + return true; + + case library::NumericTypeID::kF32: + cudnn_element_type = CUDNN_DATA_FLOAT; + return true; + + case library::NumericTypeID::kF64: + cudnn_element_type = CUDNN_DATA_DOUBLE; + return true; + + case library::NumericTypeID::kS2: + break; + + case library::NumericTypeID::kS4: + break; + + case library::NumericTypeID::kS8: + cudnn_element_type = CUDNN_DATA_INT8; + return true; + + case library::NumericTypeID::kS16: + break; + + case library::NumericTypeID::kS32: + cudnn_element_type = CUDNN_DATA_INT32; + return true; + + case library::NumericTypeID::kS64: + break; + + case library::NumericTypeID::kU2: + break; + + case library::NumericTypeID::kU4: + break; + + case library::NumericTypeID::kU8: + cudnn_element_type = CUDNN_DATA_UINT8; + return true; + + case library::NumericTypeID::kU16: + break; + + case library::NumericTypeID::kU32: + break; + + case library::NumericTypeID::kU64: + break; + + case library::NumericTypeID::kB1: + break; + + case library::NumericTypeID::kInvalid: + + default: + break; + } + + return false; +} + +/// Maps CUTLASS math OpcodeClassID and MathOperationID to cuDNN math_type +bool get_cudnn_mathtype(cudnnMathType_t &cudnn_math_type, library::ConvDescription const &conv_desc) { + + switch (conv_desc.tile_description.math_instruction.opcode_class) { + + case library::OpcodeClassID::kTensorOp: + { + cudnn_math_type = CUDNN_TENSOR_OP_MATH; + + library::MathOperationID math_op = conv_desc.tile_description.math_instruction.math_operation; + + // Allow conversion on input data type for fast math operations + if (math_op == library::MathOperationID::kMultiplyAddFastF16 || + math_op == library::MathOperationID::kMultiplyAddFastBF16) + { + cudnn_math_type = CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION; + } + + return true; + } + case library::OpcodeClassID::kSimt: + #if (defined(CUDNN_VERSION) && CUDNN_VERSION <= 8000) + cudnn_math_type = CUDNN_DEFAULT_MATH; + #else + cudnn_math_type = CUDNN_FMA_MATH; + #endif + return true; + } + + return false; +} + +/// Cudnn compute type seems to be hardcoded to float (To handle a possible cudnn issue) +float cast_cudnn_compute_type_to_float(library::NumericTypeID type, void const * src) { + + switch (type) { + case library::NumericTypeID::kF16: + { + return float(*(static_cast(src))); + } + case library::NumericTypeID::kF32: + { + return float(*(static_cast(src))); + } + case library::NumericTypeID::kS32: + { + return float(*(static_cast(src))); + } + default: + throw std::runtime_error("Data type handled in cast_compute_type_to_float"); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Returns a status if cuDNN can satisfy a particular Conv2d description +Status cudnn_satisfies( + library::ConvDescription const &desc, + library::Conv2dConfiguration const &configuration) { + + auto const &a_tensor = desc.A; + auto const &b_tensor = desc.B; + auto const &c_tensor = desc.C; + auto const &math_instruction = desc.tile_description.math_instruction; + + if(a_tensor.element != b_tensor.element) { + return Status::kErrorInvalidDataType; + } + + //////////////////////// Convolution output dimensions p and q /////////////////////// + // Cutlass convolutions support arbitrary output dimensions and not constrained by // + // input, filter, padding, striding, dilation sizes. // + // cuDNN sets the output dimensions (p, q) using following equations: // + // // + // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // + // where; div_up(a, b) : (a - 1)/b + 1 // + // // + // Before launching cudnn verification or profiling check that output p and q // + // dimensions are cuDNN compliant. // + // // + // If user sets output p and q which do not follow above constraints, cutlass conv, // + // host reference, device reference can run. However, cudnn convolution returns // + // "Invalid problem" // + // // + /////////////////////////////////////////////////////////////////////////////////////// + + // check conv output dimension p for cudnn + int cudnn_output_p = + ( + ( + configuration.problem_size.H + + 2 * configuration.problem_size.pad_h - + ((configuration.problem_size.R - 1) * + configuration.problem_size.dilation_h + 1) + ) / + (configuration.problem_size.stride_h) + + 1 + ); + + if (cudnn_output_p != configuration.problem_size.P) { + return Status::kErrorInvalidProblem; + } + + // check conv output dimension q for cudnn + int cudnn_output_q = + ( + ( + configuration.problem_size.W + + 2 * configuration.problem_size.pad_w - + ((configuration.problem_size.S - 1) * + configuration.problem_size.dilation_w + 1) + ) / + (configuration.problem_size.stride_w) + + 1 + ); + + if (cudnn_output_q != configuration.problem_size.Q) { + return Status::kErrorInvalidProblem; + } + ////////////////////////////////////////////////////////////////////////////////////// + + // conv operator with input=FP16, accumulator=FP32, output=FP32 datatype + if (a_tensor.element == library::NumericTypeID::kF16 && + b_tensor.element == library::NumericTypeID::kF16 && + math_instruction.element_accumulator == library::NumericTypeID::kF32 && + c_tensor.element == library::NumericTypeID::kF32 + ) { + + return Status::kErrorNotSupported; + } + + if (a_tensor.element == library::NumericTypeID::kBF16 || + b_tensor.element == library::NumericTypeID::kBF16 || + c_tensor.element == library::NumericTypeID::kBF16 + ) { + + return Status::kErrorNotSupported; + } + + // TF32 input not supported in cuDNN + if (a_tensor.element == library::NumericTypeID::kTF32 || + b_tensor.element == library::NumericTypeID::kTF32 || + c_tensor.element == library::NumericTypeID::kTF32 + ) { + + return Status::kErrorNotSupported; + } + + if (a_tensor.element == library::NumericTypeID::kS8 || + b_tensor.element == library::NumericTypeID::kS8 || + c_tensor.element == library::NumericTypeID::kS8 + ) { + + return Status::kErrorNotSupported; + } + + if (a_tensor.element == library::NumericTypeID::kU8 || + b_tensor.element == library::NumericTypeID::kU8 || + c_tensor.element == library::NumericTypeID::kU8 + ) { + + return Status::kErrorNotSupported; + } + + if (a_tensor.element == library::NumericTypeID::kS4 || + b_tensor.element == library::NumericTypeID::kS4 || + c_tensor.element == library::NumericTypeID::kS4 + ) { + + return Status::kErrorNotSupported; + } + + if (a_tensor.element == library::NumericTypeID::kU4 || + b_tensor.element == library::NumericTypeID::kU4 || + c_tensor.element == library::NumericTypeID::kU4 + ) { + + return Status::kErrorNotSupported; + } + + return Status::kSuccess; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns a status if cuDNN can satisfy a particular Conv3d description +Status cudnn_satisfies( + library::ConvDescription const &desc, + library::Conv3dConfiguration const &configuration) { + + auto const &a_tensor = desc.A; + auto const &b_tensor = desc.B; + auto const &c_tensor = desc.C; + auto const &math_instruction = desc.tile_description.math_instruction; + + if(a_tensor.element != b_tensor.element) { + return Status::kErrorInvalidDataType; + } + + //////////////////////// Convolution output dimensions p and q /////////////////////// + // Cutlass convolutions support arbitrary output dimensions and not constrained by // + // input, filter, padding, striding, dilation sizes. // + // cuDNN sets the output dimensions (p, q) using following equations: // + // // + // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // + // where; div_up(a, b) : (a - 1)/b + 1 // + // // + // Before launching cudnn verification or profiling check that output p and q // + // dimensions are cuDNN compliant. // + // // + // If user sets output p and q which do not follow above constraints, cutlass conv, // + // host reference, device reference can run. However, cudnn convolution returns // + // "Invalid problem" // + // // + /////////////////////////////////////////////////////////////////////////////////////// + + // check conv output dimension z for cudnn + int cudnn_output_z = + ( + ( + configuration.problem_size.D + + 2 * configuration.problem_size.pad_d - + ((configuration.problem_size.T - 1) * + configuration.problem_size.dilation_d + 1) + ) / + (configuration.problem_size.stride_d) + + 1 + ); + + if (cudnn_output_z != configuration.problem_size.Z) { + return Status::kErrorInvalidProblem; + } + + // check conv output dimension p for cudnn + int cudnn_output_p = + ( + ( + configuration.problem_size.H + + 2 * configuration.problem_size.pad_h - + ((configuration.problem_size.R - 1) * + configuration.problem_size.dilation_h + 1) + ) / + (configuration.problem_size.stride_h) + + 1 + ); + + if (cudnn_output_p != configuration.problem_size.P) { + return Status::kErrorInvalidProblem; + } + + // check conv output dimension q for cudnn + int cudnn_output_q = + ( + ( + configuration.problem_size.W + + 2 * configuration.problem_size.pad_w - + ((configuration.problem_size.S - 1) * + configuration.problem_size.dilation_w + 1) + ) / + (configuration.problem_size.stride_w) + + 1 + ); + + if (cudnn_output_q != configuration.problem_size.Q) { + return Status::kErrorInvalidProblem; + } + ////////////////////////////////////////////////////////////////////////////////////// + + // conv operator with input, accumulator, output datatype of (hss) are not supported + // in cuDNN + if (a_tensor.element == library::NumericTypeID::kF16 && + b_tensor.element == library::NumericTypeID::kF16 && + math_instruction.element_accumulator == library::NumericTypeID::kF32 && + c_tensor.element == library::NumericTypeID::kF32 + ) { + + return Status::kErrorNotSupported; + } + + if (a_tensor.element == library::NumericTypeID::kBF16 || + b_tensor.element == library::NumericTypeID::kBF16 || + c_tensor.element == library::NumericTypeID::kBF16 + ) { + + return Status::kErrorNotSupported; + } + + if (a_tensor.element == library::NumericTypeID::kTF32 || + b_tensor.element == library::NumericTypeID::kTF32 || + c_tensor.element == library::NumericTypeID::kTF32 + ) { + + return Status::kErrorNotSupported; + } + + if (a_tensor.element == library::NumericTypeID::kS8 || + b_tensor.element == library::NumericTypeID::kS8 || + c_tensor.element == library::NumericTypeID::kS8 + ) { + + return Status::kErrorNotSupported; + } + + // S4 not supported in cuDNN + if (a_tensor.element == library::NumericTypeID::kS4 || + b_tensor.element == library::NumericTypeID::kS4 || + c_tensor.element == library::NumericTypeID::kS4 + ) { + + return Status::kErrorNotSupported; + } + + return Status::kSuccess; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cutlass_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cutlass_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..78d74e3b02d909fdb3dae8045877225c6817131d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/cutlass_profiler.cu @@ -0,0 +1,225 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment +*/ + +#include +#include + +// Profiler includes +#include "cutlass/profiler/cutlass_profiler.h" +#include "cutlass/profiler/gemm_operation_profiler.h" +#include "cutlass/profiler/rank_k_operation_profiler.h" +#include "cutlass/profiler/rank_2k_operation_profiler.h" +#include "cutlass/profiler/trmm_operation_profiler.h" +#include "cutlass/profiler/symm_operation_profiler.h" +#include "cutlass/profiler/conv2d_operation_profiler.h" +#include "cutlass/profiler/conv3d_operation_profiler.h" +#include "cutlass/profiler/sparse_gemm_operation_profiler.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +CutlassProfiler::CutlassProfiler( + Options const &options +): + options_(options) { + + operation_profilers_.emplace_back(new GemmOperationProfiler(options)); + + operation_profilers_.emplace_back(new SparseGemmOperationProfiler(options)); + + operation_profilers_.emplace_back(new Conv2dOperationProfiler(options)); + + operation_profilers_.emplace_back(new Conv3dOperationProfiler(options)); + + operation_profilers_.emplace_back(new RankKOperationProfiler(options)); + + operation_profilers_.emplace_back(new Rank2KOperationProfiler(options)); + + operation_profilers_.emplace_back(new TrmmOperationProfiler(options)); + + operation_profilers_.emplace_back(new SymmOperationProfiler(options)); +} + +CutlassProfiler::~CutlassProfiler() { + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Execute the program +int CutlassProfiler::operator()() { + + if (options_.cmdline.num_naked_args() > 0) { + std::cerr << "Unknown args: \n"; + options_.cmdline.print_naked_args(std::cerr); + std::cerr << "\n\n\n"; + + print_usage_(std::cout); + return 1; + } + + if (options_.about.help) { + if (options_.operation_kind == library::OperationKind::kInvalid) { + print_usage_(std::cout); + } + else { + for (auto & profiler : operation_profilers_) { + if (profiler->kind() == options_.operation_kind) { + profiler->print_usage(std::cout); + profiler->print_examples(std::cout); + return 0; + } + } + } + return 0; + } + else if (options_.about.version) { + options_.about.print_version(std::cout); + + std::cout << std::endl; + return 0; + } + else if (options_.about.device_info) { + options_.device.print_device_info(std::cout); + return 0; + } + + if (options_.execution_mode == ExecutionMode::kProfile || + options_.execution_mode == ExecutionMode::kDryRun || + options_.execution_mode == ExecutionMode::kTrace) { + + // Profiles all operations + return profile_(); + } + else if (options_.execution_mode == ExecutionMode::kEnumerate) { + // Enumerates all operations + enumerate_(); + } + return 0; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Enumerates all operations +void CutlassProfiler::enumerate_() { + +} + +/// Profiles all operations +int CutlassProfiler::profile_() { + + int result = 0; + DeviceContext device_context; + + // For all profilers + for (auto & profiler : operation_profilers_) { + + if (options_.operation_kind == library::OperationKind::kInvalid || + options_.operation_kind == profiler->kind()) { + + result = profiler->profile_all(options_, library::Singleton::get().manifest, device_context); + + if (result) { + return result; + } + } + } + + return result; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Prints all options +void CutlassProfiler::print_usage_(std::ostream &out) { + options_.print_usage(out); + + out << "\nOperations:\n\n"; + + // For all profilers + for (auto & profiler : operation_profilers_) { + + + std::string kind_str = library::to_string(profiler->kind()); + + size_t kAlignment = 40; + size_t columns = 0; + + if (kind_str.size() < kAlignment) { + columns = kAlignment - kind_str.size(); + } + + out << " " << kind_str << std::string(columns, ' ') << profiler->description() << "\n"; + + } + + out << "\n\nFor details about a particular function, specify the function name with --help.\n\nExample:\n\n" + << " $ cutlass_profiler --operation=Gemm --help\n\n" + << " $ cutlass_profiler --operation=RankK --help\n\n" + << " $ cutlass_profiler --operation=Trmm --help\n\n" + << " $ cutlass_profiler --operation=Symm --help\n\n" + << " $ cutlass_profiler --operation=Conv3d --help\n\n" + << " $ cutlass_profiler --operation=Conv2d --help\n\n" + << " $ cutlass_profiler --operation=SparseGemm --help\n\n" + ; +} + +/// Prints usage +void CutlassProfiler::print_options_(std::ostream &out) { + options_.print_options(out); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Initializes the CUDA device +void CutlassProfiler::initialize_device_() { + + cudaError_t result = cudaSetDevice(options_.device.device); + + if (result != cudaSuccess) { + std::cerr << "Failed to set device."; + throw std::runtime_error("Failed to set device"); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/device_allocation.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/device_allocation.cu new file mode 100644 index 0000000000000000000000000000000000000000..088358278ad7b84856cbb7d5b55d3ab03a33704d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/device_allocation.cu @@ -0,0 +1,2265 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment +*/ + +#include + +#include "cutlass/numeric_types.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" + +#include "cutlass/util/reference/device/tensor_compare.h" +#include "cutlass/util/reference/device/tensor_fill.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/tensor_view_io.h" + +#include "cutlass/library/util.h" + +#include "cutlass/profiler/device_allocation.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) { + return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +static std::vector get_packed_layout_stride(std::vector const &extent) { + + typename Layout::TensorCoord extent_coord; + typename Layout::Stride stride_coord; + + if (extent.size() != size_t(Layout::kRank)) { + throw std::runtime_error("Layout does not have same rank as extent vector."); + } + + for (int i = 0; i < Layout::kRank; ++i) { + extent_coord[i] = extent.at(i); + } + + std::vector stride; + stride.resize(Layout::kStrideRank, 0); + + Layout layout = Layout::packed(extent_coord); + stride_coord = layout.stride(); + + for (int i = 0; i < Layout::kStrideRank; ++i) { + stride.at(i) = (int64_t)stride_coord[i]; + } + + return stride; +} + +/// Returns the stride of a packed layout +std::vector DeviceAllocation::get_packed_layout( + library::LayoutTypeID layout_id, + std::vector const &extent) { + + std::vector stride; + + switch (layout_id) { + case library::LayoutTypeID::kColumnMajor: + stride = get_packed_layout_stride(extent); + break; + case library::LayoutTypeID::kRowMajor: + stride = get_packed_layout_stride(extent); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK2: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kRowMajorInterleavedK2: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK4: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kRowMajorInterleavedK4: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK16: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kRowMajorInterleavedK16: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK32: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kRowMajorInterleavedK32: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK64: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kRowMajorInterleavedK64: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kTensorNCHW: + stride = get_packed_layout_stride(extent); + break; + case library::LayoutTypeID::kTensorNHWC: + stride = get_packed_layout_stride(extent); + break; + case library::LayoutTypeID::kTensorNDHWC: + stride = get_packed_layout_stride(extent); + break; + case library::LayoutTypeID::kTensorNC32HW32: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kTensorNC64HW64: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kTensorC32RSK32: + stride = get_packed_layout_stride>(extent); + break; + case library::LayoutTypeID::kTensorC64RSK64: + stride = get_packed_layout_stride>(extent); + break; + default: break; + } + + return stride; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Template to use CUTLASS Layout functions to +template +static size_t construct_layout_( + void *bytes, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector &stride) { + + if (extent.size() != Layout::kRank) { + throw std::runtime_error( + "Layout must have same rank as extent vector."); + } + + if (Layout::kStrideRank && stride.empty()) { + + stride = get_packed_layout_stride(extent); + + return construct_layout_( + bytes, + layout_id, + extent, + stride); + } + else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) { + throw std::runtime_error( + "Layout requires either empty stride or stride vector matching Layout::kStrideRank"); + } + + typename Layout::Stride stride_coord; + for (int i = 0; i < Layout::kStrideRank; ++i) { + stride_coord[i] = (int)stride.at(i); + } + + typename Layout::TensorCoord extent_coord; + for (int i = 0; i < Layout::kRank; ++i) { + extent_coord[i] = extent.at(i); + } + + // Construct the CUTLASS layout object from the stride object + Layout layout(stride_coord); + + // Pack it into bytes + if (bytes) { + *reinterpret_cast(bytes) = layout; + } + + // Return capacity + size_t capacity_ = layout.capacity(extent_coord); + + return capacity_; +} + +/// returns the capacity needed +size_t DeviceAllocation::construct_layout( + void *bytes, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector &stride) { + + switch (layout_id) { + case library::LayoutTypeID::kColumnMajor: + return construct_layout_(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kRowMajor: + return construct_layout_(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kColumnMajorInterleavedK2: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kRowMajorInterleavedK2: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kColumnMajorInterleavedK4: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kRowMajorInterleavedK4: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kColumnMajorInterleavedK16: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kRowMajorInterleavedK16: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kColumnMajorInterleavedK32: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kRowMajorInterleavedK32: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kColumnMajorInterleavedK64: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kRowMajorInterleavedK64: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kTensorNCHW: + return construct_layout_(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kTensorNHWC: + return construct_layout_(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kTensorNDHWC: + return construct_layout_(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kTensorNC32HW32: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kTensorNC64HW64: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kTensorC32RSK32: + return construct_layout_>(bytes, layout_id, extent, stride); + + case library::LayoutTypeID::kTensorC64RSK64: + return construct_layout_>(bytes, layout_id, extent, stride); + + default: break; + } + + return 0; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +DeviceAllocation::DeviceAllocation(): + type_(library::NumericTypeID::kInvalid), + batch_stride_(0), + capacity_(0), + pointer_(nullptr), + layout_(library::LayoutTypeID::kUnknown), + batch_count_(1) { + +} + +DeviceAllocation::DeviceAllocation( + library::NumericTypeID type, + size_t capacity +): + type_(type), batch_stride_(capacity), capacity_(capacity), pointer_(nullptr), + layout_(library::LayoutTypeID::kUnknown), batch_count_(1) { + + cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity)); + + if (result != cudaSuccess) { + type_ = library::NumericTypeID::kInvalid; + capacity_ = 0; + pointer_ = nullptr; + throw std::bad_alloc(); + } +} + +DeviceAllocation::DeviceAllocation( + library::NumericTypeID type, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector const &stride, + int batch_count +): + type_(type), batch_stride_(size_t(0)), capacity_(size_t(0)), pointer_(nullptr), batch_count_(1) { + + reset(type, layout_id, extent, stride, batch_count); +} + +DeviceAllocation::~DeviceAllocation() { + if (pointer_) { + cudaFree(pointer_); + } +} + +DeviceAllocation &DeviceAllocation::reset() { + if (pointer_) { + cudaFree(pointer_); + } + + type_ = library::NumericTypeID::kInvalid; + batch_stride_ = 0; + capacity_ = 0; + pointer_ = nullptr; + layout_ = library::LayoutTypeID::kUnknown; + stride_.clear(); + extent_.clear(); + tensor_ref_buffer_.clear(); + batch_count_ = 1; + + return *this; +} + +DeviceAllocation &DeviceAllocation::reset(library::NumericTypeID type, size_t capacity) { + + reset(); + + type_ = type; + batch_stride_ = capacity; + capacity_ = capacity; + + cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type_, capacity_)); + if (result != cudaSuccess) { + throw std::bad_alloc(); + } + + layout_ = library::LayoutTypeID::kUnknown; + stride_.clear(); + extent_.clear(); + batch_count_ = 1; + + tensor_ref_buffer_.resize(sizeof(pointer_), 0); + std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_)); + + return *this; +} + +/// Allocates memory for a given layout and tensor +DeviceAllocation &DeviceAllocation::reset( + library::NumericTypeID type, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector const &stride, + int batch_count) { + + reset(); + + tensor_ref_buffer_.resize(sizeof(pointer_) + (sizeof(int64_t) * library::get_layout_stride_rank(layout_id)), 0); + + type_ = type; + + layout_ = layout_id; + stride_ = stride; + extent_ = extent; + batch_count_ = batch_count; + + batch_stride_ = construct_layout( + tensor_ref_buffer_.data() + sizeof(pointer_), + layout_id, + extent, + stride_); + + capacity_ = batch_stride_ * batch_count_; + + cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity_)); + if (result != cudaSuccess) { + throw std::bad_alloc(); + } + + std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_)); + + return *this; +} + +bool DeviceAllocation::good() const { + return (capacity_ && pointer_); +} + +library::NumericTypeID DeviceAllocation::type() const { + return type_; +} + +void *DeviceAllocation::data() const { + return pointer_; +} + +void *DeviceAllocation::batch_data(int batch_idx) const { + return static_cast(data()) + batch_stride_bytes() * batch_idx; +} + +library::LayoutTypeID DeviceAllocation::layout() const { + return layout_; +} + +std::vector const & DeviceAllocation::stride() const { + return stride_; +} + +/// Gets the extent vector +std::vector const & DeviceAllocation::extent() const { + return extent_; +} + +/// Gets the number of adjacent tensors in memory +int DeviceAllocation::batch_count() const { + return batch_count_; +} + +/// Gets the stride (in units of elements) between items +int64_t DeviceAllocation::batch_stride() const { + return batch_stride_; +} + +/// Gets the stride (in units of bytes) between items +int64_t DeviceAllocation::batch_stride_bytes() const { + return bytes(type_, batch_stride_); +} + +size_t DeviceAllocation::capacity() const { + return capacity_; +} + +size_t DeviceAllocation::bytes() const { + return bytes(type_, capacity_); +} + +/// Copies from an equivalent-sized tensor in device memory +void DeviceAllocation::copy_from_device(void const *ptr) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping copy of size 0 allocation\n"; +#endif + return; + } + + cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyDeviceToDevice); + if (result != cudaSuccess) { + throw std::runtime_error("Failed device-to-device copy"); + } +} + +/// Copies from an equivalent-sized tensor in device memory +void DeviceAllocation::copy_from_host(void const *ptr) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping copy of size 0 allocation\n"; +#endif + return; + } + + cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyHostToDevice); + if (result != cudaSuccess) { + throw std::runtime_error("Failed host-to-device copy"); + } +} + +/// Copies from an equivalent-sized tensor in device memory +void DeviceAllocation::copy_to_host(void *ptr) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping copy of size 0 allocation\n"; +#endif + return; + } + + cudaError_t result = cudaMemcpy(ptr, data(), bytes(), cudaMemcpyDeviceToHost); + if (result != cudaSuccess) { + throw std::runtime_error("Failed device-to-host copy"); + } +} + +void DeviceAllocation::initialize_random_device(int seed, Distribution dist) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping initialization of size 0 allocation\n"; +#endif + return; + } + + if (!data()) { + throw std::runtime_error("Attempting to initialize invalid allocation."); + } + + // Instantiate calls to CURAND here. This file takes a long time to compile for + // this reason. + + switch (type_) { + case library::NumericTypeID::kF16: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kBF16: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kTF32: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kF32: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCBF16: + cutlass::reference::device::BlockFillRandom>( + reinterpret_cast *>(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCTF32: + cutlass::reference::device::BlockFillRandom>( + reinterpret_cast *>(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCF32: + cutlass::reference::device::BlockFillRandom>( + reinterpret_cast *>(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kFE4M3: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kFE5M2: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kF64: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCF64: + cutlass::reference::device::BlockFillRandom>( + reinterpret_cast *>(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS2: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS4: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS8: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS16: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS32: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS64: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kB1: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU2: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU4: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU8: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU16: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU32: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU64: + cutlass::reference::device::BlockFillRandom( + reinterpret_cast(pointer_), + capacity_, + seed, + dist + ); + break; + default: break; + } +} + +void DeviceAllocation::initialize_random_host(int seed, Distribution dist) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping initialization of size 0 allocation\n"; +#endif + return; + } + + if (!data()) { + throw std::runtime_error("Attempting to initialize invalid allocation."); + } + + std::vector host_data(bytes()); + + switch (type_) { + case library::NumericTypeID::kFE4M3: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kFE5M2: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kF16: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kBF16: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kTF32: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kF32: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCF16: + cutlass::reference::host::BlockFillRandom>( + reinterpret_cast *>(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCBF16: + cutlass::reference::host::BlockFillRandom>( + reinterpret_cast *>(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCTF32: + cutlass::reference::host::BlockFillRandom>( + reinterpret_cast *>(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCF32: + cutlass::reference::host::BlockFillRandom>( + reinterpret_cast *>(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kF64: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kCF64: + cutlass::reference::host::BlockFillRandom>( + reinterpret_cast *>(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS2: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS4: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS8: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS16: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS32: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kS64: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kB1: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU2: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU4: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU8: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU16: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU32: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + case library::NumericTypeID::kU64: + cutlass::reference::host::BlockFillRandom( + reinterpret_cast(host_data.data()), + capacity_, + seed, + dist + ); + break; + default: break; + } + + copy_from_host(host_data.data()); +} + +void DeviceAllocation::initialize_sequential_device(Distribution dist) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping initialization of size 0 allocation\n"; +#endif + return; + } + + if (!data()) { + throw std::runtime_error("Attempting to initialize invalid allocation."); + } + + switch (type_) { + case library::NumericTypeID::kFE4M3: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kFE5M2: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kF16: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kBF16: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kTF32: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kF32: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kCF16: + cutlass::reference::device::BlockFillSequential>( + reinterpret_cast *>(pointer_), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kCBF16: + cutlass::reference::device::BlockFillSequential>( + reinterpret_cast *>(pointer_), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kCTF32: + cutlass::reference::device::BlockFillSequential>( + reinterpret_cast *>(pointer_), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kCF32: + cutlass::reference::device::BlockFillSequential>( + reinterpret_cast *>(pointer_), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kF64: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kCF64: + cutlass::reference::device::BlockFillSequential>( + reinterpret_cast *>(pointer_), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kS2: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS4: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS8: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS16: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS32: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS64: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kB1: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU2: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU4: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU8: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU16: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU32: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU64: + cutlass::reference::device::BlockFillSequential( + reinterpret_cast(pointer_), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + default: break; + } + +} + +void DeviceAllocation::initialize_sequential_host(Distribution dist) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping initialization of size 0 allocation\n"; +#endif + return; + } + + if (!data()) { + throw std::runtime_error("Attempting to initialize invalid allocation."); + } + + std::vector host_data(bytes()); + + switch (type_) { + case library::NumericTypeID::kFE4M3: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kFE5M2: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kF16: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kBF16: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kTF32: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kF32: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kCF16: + cutlass::reference::host::BlockFillSequential>( + reinterpret_cast *>(host_data.data()), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kCBF16: + cutlass::reference::host::BlockFillSequential>( + reinterpret_cast *>(host_data.data()), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kCTF32: + cutlass::reference::host::BlockFillSequential>( + reinterpret_cast *>(host_data.data()), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kCF32: + cutlass::reference::host::BlockFillSequential>( + reinterpret_cast *>(host_data.data()), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kF64: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kCF64: + cutlass::reference::host::BlockFillSequential>( + reinterpret_cast *>(host_data.data()), + capacity_, + cutlass::complex( + static_cast(dist.sequential.delta)), + cutlass::complex( + static_cast(dist.sequential.start)) + ); + break; + case library::NumericTypeID::kS2: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS4: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS8: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS16: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS32: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kS64: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kB1: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU2: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU4: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU8: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU16: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU32: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + case library::NumericTypeID::kU64: + cutlass::reference::host::BlockFillSequential( + reinterpret_cast(host_data.data()), + capacity_, + static_cast(dist.sequential.delta), + static_cast(dist.sequential.start) + ); + break; + default: break; + } + + copy_from_host(host_data.data()); +} + +void DeviceAllocation::initialize_random_sparsemeta_device(int seed, int MetaSizeInBits) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping initialization of size 0 allocation\n"; +#endif + return; + } + + if (!data()) { + throw std::runtime_error("Attempting to initialize invalid allocation."); + } + + // Instantiate calls to CURAND here. This file takes a long time to compile for + // this reason. + + switch (type_) { + case library::NumericTypeID::kU16: + cutlass::reference::device::BlockFillRandomSparseMeta( + reinterpret_cast(pointer_), + capacity_, + seed, + MetaSizeInBits + ); + break; + case library::NumericTypeID::kU32: + cutlass::reference::device::BlockFillRandomSparseMeta( + reinterpret_cast(pointer_), + capacity_, + seed, + MetaSizeInBits + ); + break; + default: + break; + } +} + +void DeviceAllocation::initialize_random_sparsemeta_host(int seed, int MetaSizeInBits) { + if (!bytes()) { +#ifndef NDEBUG + std::cout << "Skipping initialization of size 0 allocation\n"; +#endif + return; + } + + if (!data()) { + throw std::runtime_error("Attempting to initialize invalid allocation."); + } + + std::vector host_data(bytes()); + + switch (type_) { + case library::NumericTypeID::kS16: + cutlass::reference::host::BlockFillRandomSparseMeta( + reinterpret_cast(host_data.data()), + capacity_, + seed, + MetaSizeInBits + ); + break; + case library::NumericTypeID::kS32: + cutlass::reference::host::BlockFillRandomSparseMeta( + reinterpret_cast(host_data.data()), + capacity_, + seed, + MetaSizeInBits + ); + break; + default: + break; + } + + copy_from_host(host_data.data()); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if two blocks have exactly the same value +bool DeviceAllocation::block_compare_equal( + library::NumericTypeID numeric_type, + void const *ptr_A, + void const *ptr_B, + size_t capacity) { + + switch (numeric_type) { + case library::NumericTypeID::kFE4M3: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kFE5M2: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kF16: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kBF16: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kTF32: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kF32: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kCF32: + return reference::device::BlockCompareEqual >( + reinterpret_cast const *>(ptr_A), + reinterpret_cast const *>(ptr_B), + capacity); + + case library::NumericTypeID::kCF16: + return reference::device::BlockCompareEqual>( + reinterpret_cast const *>(ptr_A), + reinterpret_cast const *>(ptr_B), + capacity); + + case library::NumericTypeID::kCBF16: + return reference::device::BlockCompareEqual>( + reinterpret_cast const *>(ptr_A), + reinterpret_cast const *>(ptr_B), + capacity); + + case library::NumericTypeID::kCTF32: + return reference::device::BlockCompareEqual>( + reinterpret_cast const *>(ptr_A), + reinterpret_cast const *>(ptr_B), + capacity); + + case library::NumericTypeID::kF64: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kCF64: + return reference::device::BlockCompareEqual>( + reinterpret_cast const *>(ptr_A), + reinterpret_cast const *>(ptr_B), + capacity); + + case library::NumericTypeID::kS2: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kS4: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kS8: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kS16: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kS32: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kS64: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kB1: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kU2: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kU4: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kU8: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kU16: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kU32: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + case library::NumericTypeID::kU64: + return reference::device::BlockCompareEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity); + + default: + throw std::runtime_error("Unsupported numeric type"); + } +} + +/// Returns true if two blocks have approximately the same value +bool DeviceAllocation::block_compare_relatively_equal( + library::NumericTypeID numeric_type, + void const *ptr_A, + void const *ptr_B, + size_t capacity, + double epsilon, + double nonzero_floor) { + + switch (numeric_type) { + case library::NumericTypeID::kFE4M3: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kFE5M2: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kF16: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kBF16: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kTF32: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kF32: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kF64: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kS2: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kS4: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kS8: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kS16: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kS32: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kS64: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kB1: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kU2: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kU4: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kU8: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kU16: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kU32: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + case library::NumericTypeID::kU64: + return reference::device::BlockCompareRelativelyEqual( + reinterpret_cast(ptr_A), + reinterpret_cast(ptr_B), + capacity, + static_cast(epsilon), + static_cast(nonzero_floor)); + + // No relatively equal comparison for complex numbers. + // + // As a simplification, we can require bitwise equality. This avoids false positives. + // (i.e. "pass" really means passing. "Fail" may not actually mean failure given appropriate epsilon.) + // + case library::NumericTypeID::kCF16: + return reference::device::BlockCompareEqual >( + reinterpret_cast const *>(ptr_A), + reinterpret_cast const *>(ptr_B), + capacity); + + case library::NumericTypeID::kCF32: + return reference::device::BlockCompareEqual >( + reinterpret_cast const *>(ptr_A), + reinterpret_cast const *>(ptr_B), + capacity); + + case library::NumericTypeID::kCF64: + return reference::device::BlockCompareEqual >( + reinterpret_cast const *>(ptr_A), + reinterpret_cast const *>(ptr_B), + capacity); + + default: + { + throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type)); + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Permits copying dynamic vectors into static-length vectors +template +struct vector_to_coord { + + vector_to_coord(TensorCoord &coord, std::vector const &vec) { + + coord[Rank - 1] = vec.at(Rank - 1); + + if (Rank > 1) { + vector_to_coord(coord, vec); + } + } + + vector_to_coord(TensorCoord &coord, std::vector const &vec) { + + coord[Rank - 1] = (int)vec.at(Rank - 1); + + if (Rank > 1) { + vector_to_coord(coord, vec); + } + } +}; + +/// Permits copying dynamic vectors into static-length vectors +template +struct vector_to_coord { + + vector_to_coord(TensorCoord &coord, std::vector const &vec) { + + coord[0] = vec.at(0); + } + + vector_to_coord(TensorCoord &coord, std::vector const &vec) { + + coord[0] = (int)vec.at(0); + } +}; + +/// Permits copying dynamic vectors into static-length vectors +template +struct vector_to_coord { + + vector_to_coord(TensorCoord &coord, std::vector const &vec) { + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +static void write_tensor_csv_static_tensor_view( + std::ostream &out, + DeviceAllocation &allocation) { + + Coord extent; + Coord stride; + + if (allocation.extent().size() != Layout::kRank) { + throw std::runtime_error("Allocation extent has invalid rank"); + } + + if (allocation.stride().size() != Layout::kStrideRank) { + throw std::runtime_error("Allocation stride has invalid rank"); + } + + vector_to_coord, Layout::kRank>(extent, allocation.extent()); + vector_to_coord, + Layout::kStrideRank>(stride, allocation.stride()); + + Layout layout(stride); + HostTensor host_tensor(extent, layout, false); + + if (host_tensor.capacity() != allocation.batch_stride()) { + throw std::runtime_error("Unexpected capacity to equal."); + } + + host_tensor.copy_in_device_to_host( + static_cast(allocation.data()), + allocation.batch_stride()); + + TensorViewWrite(out, host_tensor.host_view()); + + out << "\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +static void write_tensor_csv_static_type( + std::ostream &out, + DeviceAllocation &allocation) { + + switch (allocation.layout()) { + case library::LayoutTypeID::kRowMajor: + write_tensor_csv_static_tensor_view(out, allocation); + break; + case library::LayoutTypeID::kColumnMajor: + write_tensor_csv_static_tensor_view(out, allocation); + break; + case library::LayoutTypeID::kRowMajorInterleavedK2: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK2: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kRowMajorInterleavedK4: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK4: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kRowMajorInterleavedK16: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK16: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kRowMajorInterleavedK32: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK32: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kRowMajorInterleavedK64: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kColumnMajorInterleavedK64: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kTensorNHWC: + write_tensor_csv_static_tensor_view(out, allocation); + break; + case library::LayoutTypeID::kTensorNDHWC: + write_tensor_csv_static_tensor_view(out, allocation); + break; + case library::LayoutTypeID::kTensorNC32HW32: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kTensorNC64HW64: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kTensorC32RSK32: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + case library::LayoutTypeID::kTensorC64RSK64: + write_tensor_csv_static_tensor_view>(out, allocation); + break; + default: + throw std::runtime_error("Unhandled layout"); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Writes a tensor to csv +void DeviceAllocation::write_tensor_csv( + std::ostream &out) { + + switch (this->type()) { + case library::NumericTypeID::kFE4M3: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kFE5M2: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kF16: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kBF16: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kTF32: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kF32: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kF64: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kS2: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kS4: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kS8: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kS16: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kS32: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kS64: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kB1: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kU2: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kU4: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kU8: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kU16: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kU32: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kU64: + write_tensor_csv_static_type(out, *this); + break; + + case library::NumericTypeID::kCF16: + write_tensor_csv_static_type >(out, *this); + break; + + case library::NumericTypeID::kCF32: + write_tensor_csv_static_type >(out, *this); + break; + + case library::NumericTypeID::kCF64: + write_tensor_csv_static_type >(out, *this); + break; + + default: + throw std::runtime_error("Unsupported numeric type"); + } +} + +template +static void tensor_fill_tensor_view(DeviceAllocation &allocation, Element val = Element()) { + Coord extent; + Coord stride; + + if (allocation.extent().size() != Layout::kRank) { + throw std::runtime_error("Allocation extent has invalid rank"); + } + + if (allocation.stride().size() != Layout::kStrideRank) { + throw std::runtime_error("Allocation stride has invalid rank"); + } + + vector_to_coord, Layout::kRank>(extent, allocation.extent()); + vector_to_coord, + Layout::kStrideRank>(stride, allocation.stride()); + + TensorView view( + static_cast(allocation.data()), + Layout(stride), + extent + ); + + + cutlass::reference::device::TensorFill( + view, + val + ); +} + +template +static void tensor_fill(DeviceAllocation &allocation, Element val = Element()) { + switch (allocation.layout()) { + case library::LayoutTypeID::kRowMajor: + tensor_fill_tensor_view(allocation, val); + break; + case library::LayoutTypeID::kColumnMajor: + tensor_fill_tensor_view(allocation, val); + break; + case library::LayoutTypeID::kTensorNHWC: + tensor_fill_tensor_view(allocation, val); + break; + case library::LayoutTypeID::kTensorNDHWC: + tensor_fill_tensor_view(allocation, val); + break; + case library::LayoutTypeID::kTensorNC32HW32: + tensor_fill_tensor_view>(allocation, val); + break; + case library::LayoutTypeID::kTensorNC64HW64: + tensor_fill_tensor_view>(allocation, val); + break; + case library::LayoutTypeID::kTensorC32RSK32: + tensor_fill_tensor_view>(allocation, val); + break; + case library::LayoutTypeID::kTensorC64RSK64: + tensor_fill_tensor_view>(allocation, val); + break; + default: + throw std::runtime_error("Unsupported layout"); + break; + } +} + +/// Fills a tensor uniformly with a value (most frequently used to clear the tensor) +void DeviceAllocation::fill(double val = 0.0) { + + switch (this->type()) { + case library::NumericTypeID::kFE4M3: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kFE5M2: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kF16: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kBF16: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kTF32: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kF32: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kF64: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kS2: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kS4: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kS8: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kS16: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kS32: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kS64: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kB1: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kU2: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kU4: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kU8: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kU16: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kU32: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kU64: + tensor_fill(*this, static_cast(val)); + break; + + case library::NumericTypeID::kCF16: + tensor_fill >(*this, from_real(val)); + break; + + case library::NumericTypeID::kCF32: + tensor_fill >(*this, from_real(val)); + break; + + case library::NumericTypeID::kCF64: + tensor_fill >(*this, from_real(val)); + break; + + default: + throw std::runtime_error("Unsupported numeric type"); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/device_context.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/device_context.cu new file mode 100644 index 0000000000000000000000000000000000000000..280adab11ba41d7a1112dfa2f3ef0048e3f27556 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/device_context.cu @@ -0,0 +1,234 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief +*/ + +#include "cutlass/profiler/device_context.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Allocates memory of a given type, capacity (elements), and name +DeviceAllocation *DeviceContext::allocate_block( + std::string const &name, + library::NumericTypeID type, + size_t capacity) { + + device_memory_.emplace_back(type, capacity); + DeviceAllocation *allocation = &device_memory_.back(); + + allocations_[name] = allocation; + return allocation; +} + +/// Allocates memory of a given type, capacity (elements), and name +DeviceAllocation *DeviceContext::allocate_tensor( + std::string const &name, + library::NumericTypeID type, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector const &stride, + int batch_count) { + + device_memory_.emplace_back(type, layout_id, extent, stride, batch_count); + DeviceAllocation *allocation = &device_memory_.back(); + + allocations_[name] = allocation; + return allocation; +} + +/// Allocates memory of a given type, capacity (elements), and name +DeviceAllocation *DeviceContext::allocate_tensor( + Options const &options, + std::string const &name, + library::NumericTypeID type, + library::LayoutTypeID layout_id, + std::vector const &extent, + std::vector const &stride, + int batch_count, + int seed_shift) { + + DeviceAllocation *allocation = + allocate_tensor(name, type, layout_id, extent, stride, batch_count); + + if (options.initialization.enabled) { + Distribution data_distribution = options.initialization.data_distribution; + + // check if data distribution is allowed to change + if(!options.initialization.fix_data_distribution) { + // change data distribution based on bit width + switch(type) { + case library::NumericTypeID::kFE4M3: + data_distribution.set_uniform(-1, 1, 0); + break; + case library::NumericTypeID::kFE5M2: + data_distribution.set_uniform(-1, 1, 0); + break; + case library::NumericTypeID::kF16: + data_distribution.set_uniform(-3, 3, 0); + break; + case library::NumericTypeID::kB1: + data_distribution.set_uniform(0, 1, 0); + break; + case library::NumericTypeID::kS2: + data_distribution.set_uniform(-1, 1, 0); + break; + case library::NumericTypeID::kS4: + data_distribution.set_uniform(-2, 2, 0); + break; + case library::NumericTypeID::kU2: + data_distribution.set_uniform(0, 2, 0); + break; + case library::NumericTypeID::kU4: + data_distribution.set_uniform(0, 2, 0); + break; + case library::NumericTypeID::kS8: + data_distribution.set_uniform(-3, 3, 0); + break; + case library::NumericTypeID::kU8: + data_distribution.set_uniform(0, 4, 0); + break; + default: break; + } + } + + // Override pnz for the A/B/C tensors if overridden for Gaussian distributions + if (data_distribution.kind == Distribution::Gaussian) { + double mean = data_distribution.gaussian.mean; + double stddev = data_distribution.gaussian.stddev; + int scale = data_distribution.int_scale; + + if (name == "A" && data_distribution.gaussian.pnzA != 100.0) { + data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzA); + } + else if (name == "B" && data_distribution.gaussian.pnzB != 100.0) { + data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzB); + } + else if (name == "C" && data_distribution.gaussian.pnzC != 100.0) { + data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzC); + } + } + + if (options.initialization.provider == library::Provider::kReferenceDevice) { + if (data_distribution.kind == Distribution::Sequential) { + allocation->initialize_sequential_device( + data_distribution); + } + else { + allocation->initialize_random_device( + options.initialization.seed + seed_shift, + data_distribution); + } + } + else if (options.initialization.provider == library::Provider::kReferenceHost) { + if (data_distribution.kind == Distribution::Sequential) { + allocation->initialize_sequential_host( + data_distribution); + } + else { + allocation->initialize_random_host( + options.initialization.seed + seed_shift, + data_distribution); + } + } + } + + return allocation; +} + +/// Allocates memory for sparse meta data +DeviceAllocation *DeviceContext::allocate_sparsemeta_tensor( + Options const &options, + std::string const &name, + library::NumericTypeID type, + library::LayoutTypeID layout_id, + library::NumericTypeID type_a, + std::vector const &extent, + std::vector const &stride, + int batch_count, + int seed_shift) { + + DeviceAllocation *allocation = + allocate_tensor(name, type, layout_id, extent, stride, batch_count); + + if (options.initialization.enabled) { + // TF32 has 4bit meta data. The rest has 2bit. + int MetaSizeInBits = (cutlass::library::sizeof_bits(type_a) == 32) ? 4 : 2; + + if (options.initialization.provider == library::Provider::kReferenceDevice) { + allocation->initialize_random_sparsemeta_device( + options.initialization.seed + seed_shift, + MetaSizeInBits); + } + else if (options.initialization.provider == library::Provider::kReferenceHost) { + allocation->initialize_random_sparsemeta_host( + options.initialization.seed + seed_shift, + MetaSizeInBits); + } + } + + return allocation; +} +/// Clears named allocations (but does not necessarily free memory) +void DeviceContext::clear() { + allocations_.clear(); +} + +/// Frees all device memory allocations +void DeviceContext::free() { + allocations_.clear(); + device_memory_.clear(); +} + +/// Gets the allocation by name +DeviceAllocation &DeviceContext::at(std::string const &name) { + return *allocations_.at(name); +} + +size_t DeviceContext::size() const { + return allocations_.size(); +} + +DeviceContext::AllocationMap::iterator DeviceContext::begin() { + return allocations_.begin(); +} + +DeviceContext::AllocationMap::iterator DeviceContext::end() { + return allocations_.end(); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/enumerated_types.cpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/enumerated_types.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4c912bbaf6cce6891dccc970a55c69fb172cc6b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/enumerated_types.cpp @@ -0,0 +1,275 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Provides several functions for filling tensors with data. +*/ + +#include "cutlass/profiler/enumerated_types.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + ExecutionMode enumerant; +} +ExecutionMode_enumerants[] = { + {"profile", "Profile", ExecutionMode::kProfile}, + {"dry_run", "Dry run", ExecutionMode::kDryRun}, + {"dry", "dry run", ExecutionMode::kDryRun}, + {"trace", "Trace", ExecutionMode::kTrace}, + {"enumerate", "Enumerate", ExecutionMode::kEnumerate} +}; + +/// Converts a ExecutionMode enumerant to a string +char const *to_string(ExecutionMode mode, bool pretty) { + + for (auto const & possible : ExecutionMode_enumerants) { + if (mode == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Parses a ExecutionMode enumerant from a string +template <> +ExecutionMode from_string(std::string const &str) { + + for (auto const & possible : ExecutionMode_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return ExecutionMode::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + AlgorithmMode enumerant; +} +AlgorithmMode_enumerants[] = { + {"matching", "Matching", AlgorithmMode::kMatching}, + {"best", "Best", AlgorithmMode::kBest}, + {"default", "Default", AlgorithmMode::kDefault} +}; + +/// Converts a ExecutionMode enumerant to a string +char const *to_string(AlgorithmMode mode, bool pretty) { + + for (auto const & possible : AlgorithmMode_enumerants) { + if (mode == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Parses a ExecutionMode enumerant from a string +template <> +AlgorithmMode from_string(std::string const &str) { + + for (auto const & possible : AlgorithmMode_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return AlgorithmMode::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + Disposition enumerant; +} +Disposition_enumerants[] = { + {"passed", "Passed", Disposition::kPassed}, + {"failed", "Failed", Disposition::kFailed}, + {"not_run", "Not run", Disposition::kNotRun}, + {"not_verified", "Not verified", Disposition::kNotVerified}, + {"invalid_problem", "Invalid problem", Disposition::kInvalidProblem}, + {"not_supported", "Not supported", Disposition::kNotSupported}, + {"incorrect", "Incorrect", Disposition::kIncorrect} +}; + +/// Converts a Disposition enumerant to a string +char const *to_string(Disposition disposition, bool pretty) { + + for (auto const & possible : Disposition_enumerants) { + if (disposition == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Parses a Disposition enumerant from a string +template <> +Disposition from_string(std::string const &str) { + + for (auto const & possible : Disposition_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return Disposition::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + SaveWorkspace enumerant; +} +SaveWorkspace_enumerants[] = { + {"never", "Never", SaveWorkspace::kNever}, + {"incorrect", "Incorrect", SaveWorkspace::kIncorrect}, + {"always", "Always", SaveWorkspace::kAlways} +}; + +/// Converts a SaveWorkspace enumerant to a string +char const *to_string(SaveWorkspace save_option, bool pretty) { + + for (auto const & possible : SaveWorkspace_enumerants) { + if (save_option == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Parses a SaveWorkspace enumerant from a string +template <> +SaveWorkspace from_string(std::string const &str) { + + for (auto const & possible : SaveWorkspace_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return SaveWorkspace::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +static struct { + char const *text; + char const *pretty; + ArgumentTypeID enumerant; +} +ArgumentTypeID_enumerants[] = { + {"scalar", "Scalar", ArgumentTypeID::kScalar}, + {"int", "Integer", ArgumentTypeID::kInteger}, + {"tensor", "Tensor", ArgumentTypeID::kTensor}, + {"batched_tensor", "BatchedTensor", ArgumentTypeID::kBatchedTensor}, + {"struct", "Struct", ArgumentTypeID::kStructure}, + {"enum", "Enumerated type", ArgumentTypeID::kEnumerated} +}; + +/// Converts a ArgumentTypeID enumerant to a string +char const *to_string(ArgumentTypeID type, bool pretty) { + + for (auto const & possible : ArgumentTypeID_enumerants) { + if (type == possible.enumerant) { + if (pretty) { + return possible.pretty; + } + else { + return possible.text; + } + } + } + + return pretty ? "Invalid" : "invalid"; +} + +/// Parses a ArgumentTypeID enumerant from a string +template <> +ArgumentTypeID from_string(std::string const &str) { + + for (auto const & possible : ArgumentTypeID_enumerants) { + if ((str.compare(possible.text) == 0) || + (str.compare(possible.pretty) == 0)) { + return possible.enumerant; + } + } + + return ArgumentTypeID::kInvalid; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/gemm_operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/gemm_operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..a67118cef417654d18b56bd246eee2671e2caba6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/gemm_operation_profiler.cu @@ -0,0 +1,1246 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment +*/ + +#include +#include +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/profiler/cublas_helpers.h" +#include "cutlass/profiler/gemm_operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" +#include "cutlass/library/singleton.h" +#include "cutlass/library/library.h" +#include "cutlass/library/handle.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +GemmOperationProfiler::GemmOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kGemm, + { + {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (universal, gemm, planar_complex, planar_complex_array)"}, + {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"}, + {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"}, + {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"}, + {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, + {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, + {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, + {ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D output"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "Variant of split K mode(serial, parallel)"}, + {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of GEMMs computed in one batch"}, + {ArgumentTypeID::kEnumerated, {"raster_order", "raster-order"}, "Raster order (heuristic, along_n, along_m)"}, + }, + { library::Provider::kCUBLAS} + ) { + + description_ = " General matrix-matrix product. D = alpha * A*B + beta * C"; +} + +/// Destructor +GemmOperationProfiler::~GemmOperationProfiler() { + +} + +/// Prints usage statement for the math function +void GemmOperationProfiler::print_usage(std::ostream &out) const { + out << "GEMM" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void GemmOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular problem size:\n" + << " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n" + + << "Schmoo over problem size and beta:\n" + << " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" + + << "Schmoo over accumulator types:\n" + << " $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32\n\n" + + << "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" + << " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n" + + << "Profile a particular problem size with split K and parallel reduction:\n" + << " $ cutlass_profiler --operation=Gemm --split_k_mode=parallel --split_k_slices=2 --m=1024 --n=1024 --k=128\n\n" + + << "Using various input value distribution:\n" + << " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n" + << " $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3\n" + << " $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1\n\n" + + << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" + << " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" + + << "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n" + << " $ cutlass_profiler --operation=Gemm \\ \n" + << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" + << " --beta=0,1,2 --profiling-iterations=1 \\ \n" + << " --providers=cutlass --output=functional-test.csv\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if 0 +// used this for debugging +static std::string byte_string(std::vector const &bytes) { + std::stringstream ss; + + ss << "0x"; + + for (size_t idx = bytes.size(); idx > 0; --idx) { + ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); + } + + return ss.str(); +} +#endif + +Status GemmOperationProfiler::GemmProblem::parse( + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + this->mode = library::GemmUniversalMode::kGemm; + + if (!arg_as_int(this->m, "m", problem_space, problem)) { + // default value + this->m = 1024; + } + + if (!arg_as_int(this->n, "n", problem_space, problem)) { + // default value + this->n = 1024; + } + + if (!arg_as_int(this->k, "k", problem_space, problem)) { + // default value + this->k = 1024; + } + + if (!arg_as_SplitKModeID(this->split_k_mode, "split_k_mode", problem_space, problem)) { + // default value + this->split_k_mode = library::SplitKMode::kSerial; + } + + this->mode = library::GemmUniversalMode::kGemm; + if (this->split_k_mode == library::SplitKMode::kParallel) { + this->mode = library::GemmUniversalMode::kGemmSplitKParallel; + } + + if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + this->split_k_slices = 1; + } + + if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { + // default value + this->batch_count = 1; + } else if (this->batch_count > 1) { + this->mode = library::GemmUniversalMode::kBatched; + } + + if (!arg_as_RasterOrder(this->raster_order, "raster_order", problem_space, problem)) { + // default value + this->raster_order = library::RasterOrder::kHeuristic; + } + + if (this->split_k_slices > 1 && this->batch_count > 1) { + // At least one of these must be one + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + this->alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + this->beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, {int(this->m), int(this->k)}).front(); + + this->ldb = DeviceAllocation::get_packed_layout( + operation_desc.B.layout, {int(this->k), int(this->n)}).front(); + + this->ldc = DeviceAllocation::get_packed_layout( + operation_desc.C.layout, {int(this->m), int(this->n)}).front(); + + return Status::kSuccess; +} + +/// Total number of bytes loaded +int64_t GemmOperationProfiler::GemmProblem::bytes(library::GemmDescription const &operation_desc) const { + // Input bytes read and Output bytes written for the gemm problem + int64_t bytes = + int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * k + + int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k + + int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n; + + // Set is_beta_zero true if beta is zero + bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); + + // Output bytes read for the gemm problem for non-zero beta values + if (!is_beta_zero) { + bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n; + } + + bytes *= batch_count; + + return bytes; +} + +/// Total number of flops computed +int64_t GemmOperationProfiler::GemmProblem::flops(library::GemmDescription const &operation_desc) const { + int64_t flops_ = (int64_t(m) * n * k + m * n) * 2 * batch_count; + + // complex-valued support + switch (operation_desc.tile_description.math_instruction.math_operation) { + case library::MathOperationID::kMultiplyAddComplex: + flops_ *= 4; + break; + + case library::MathOperationID::kMultiplyAddComplexFastF32: + flops_ *= 4; + break; + + case library::MathOperationID::kMultiplyAddGaussianComplex: + flops_ *= 3; + break; + + default: break; + } + + return flops_; +} + + +/// Initializes a performance result +void GemmOperationProfiler::GemmProblem::initialize_result( + PerformanceResult &result, + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind)); + + set_argument(result, "A", problem_space, + std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); + + set_argument(result, "B", problem_space, + std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); + + set_argument(result, "C", problem_space, + std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); + + set_argument(result, "D", problem_space, + std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout)); + + set_argument(result, "m", problem_space, m); + set_argument(result, "n", problem_space, n); + set_argument(result, "k", problem_space, k); + + set_argument(result, "split_k_mode", problem_space, library::to_string(split_k_mode)); + set_argument(result, "split_k_slices", problem_space, split_k_slices); + set_argument(result, "batch_count", problem_space, batch_count); + set_argument(result, "raster_order", problem_space, library::to_string(raster_order)); + set_argument(result, "alpha", problem_space, + library::lexical_cast(alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(beta, operation_desc.element_epilogue)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Extracts the problem dimensions +Status GemmOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::GemmDescription const &operation_desc = + static_cast(operation->description()); + + if (operation_desc.gemm_kind != library::GemmKind::kUniversal) { + return Status::kErrorInvalidProblem; + } + + Status status = problem_.parse(operation_desc, problem_space, problem); + + if (status != Status::kSuccess) { + return status; + } + + gemm_workspace_.configuration.mode = problem_.mode; + gemm_workspace_.configuration.problem_size.m() = int(problem_.m); + gemm_workspace_.configuration.problem_size.n() = int(problem_.n); + gemm_workspace_.configuration.problem_size.k() = int(problem_.k); + gemm_workspace_.configuration.lda = problem_.lda; + gemm_workspace_.configuration.ldb = problem_.ldb; + gemm_workspace_.configuration.ldc = problem_.ldc; + gemm_workspace_.configuration.ldd = problem_.ldc; + + if (problem_.mode == library::GemmUniversalMode::kBatched) { + gemm_workspace_.configuration.batch_count = problem_.batch_count; + } + else { + gemm_workspace_.configuration.batch_count = problem_.split_k_slices; + } + + gemm_workspace_.arguments.A = nullptr; + gemm_workspace_.arguments.B = nullptr; + gemm_workspace_.arguments.C = nullptr; + gemm_workspace_.arguments.D = nullptr; + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + gemm_workspace_.arguments.raster_order = problem_.raster_order; + + // initialize reduction operation for parallel splitKMode + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + if (!initialize_reduction_configuration_(operation, problem)) { + return Status::kErrorInternal; + } + } + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments); +} + +/// Initializes the performance result +void GemmOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + problem_.initialize_result(result, operation_desc, problem_space); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + result.bytes = problem_.bytes(operation_desc); + result.flops = problem_.flops(operation_desc); + result.runtime = 0; + +} + +/// Initialize reduction problem dimensions and library::Operation +bool GemmOperationProfiler::initialize_reduction_configuration_( + library::Operation const *operation, + ProblemSpace::Problem const &problem) { + library::GemmDescription const &gemm_desc = + static_cast(operation->description()); + + if (!cast_from_double(problem_.alpha_one, gemm_desc.element_epilogue, 1)) { + return false; + } + + if (!cast_from_double(problem_.beta_zero, gemm_desc.element_epilogue, 0)) { + return false; + } + + /// initialize library::ReductionConfiguration + gemm_workspace_.reduction_configuration.problem_size = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn(); + gemm_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); + gemm_workspace_.reduction_configuration.partition_stride = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn().product(); + gemm_workspace_.reduction_configuration.ldw = problem_.ldc; + gemm_workspace_.reduction_configuration.lds = problem_.ldc; + gemm_workspace_.reduction_configuration.ldd = problem_.ldc; + + // find reduction operation + library::ReductionFunctionalKey reduction_key( + library::Provider::kCUTLASS, + gemm_desc.tile_description.math_instruction.element_accumulator, // element workspace + gemm_desc.tile_description.math_instruction.element_accumulator, // element accumulator + gemm_desc.D.element, // element output + gemm_desc.element_epilogue // element compute + ); + + auto reduction_it = library::Singleton::get().operation_table.reduction_operations.find(reduction_key); + + if (reduction_it == library::Singleton::get().operation_table.reduction_operations.end()) { + return false; + } + + // initialize reduction operation required for parallel split-k operator + reduction_op_ = reduction_it->second; + + // reduction operation found and initialized + return true; +} + +/// Initializes workspace +Status GemmOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::Operation const* underlying_operation = operation; + + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) { + return Status::kErrorNotSupported; + } + } + + library::GemmDescription const &operation_desc = + static_cast(operation->description()); + + // Compute the number of copies of the problem to avoid L2 camping. + if (!options.profiling.workspace_count) { + int64_t bytes = problem_.bytes(operation_desc); + if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { + gemm_workspace_.problem_count = + 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); + } + else { + gemm_workspace_.problem_count = 1; + } + } + else { + gemm_workspace_.problem_count = options.profiling.workspace_count; + } + + bool allocate_device_tensors = options.execution_mode != ExecutionMode::kDryRun; + if (allocate_device_tensors) { + int seed_shift = 0; + gemm_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.m), int(problem_.k)}, + {int(problem_.lda)}, + problem_.batch_count * gemm_workspace_.problem_count, + seed_shift++ + ); + + gemm_workspace_.B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + {int(problem_.k), int(problem_.n)}, + {int(problem_.ldb)}, + problem_.batch_count * gemm_workspace_.problem_count, + seed_shift++ + ); + + gemm_workspace_.C = device_context.allocate_tensor( + options, + "C", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)}, + problem_.batch_count * gemm_workspace_.problem_count, + seed_shift++ + ); + + gemm_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.D.element, + operation_desc.D.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)}, + problem_.batch_count * gemm_workspace_.problem_count + ); + + gemm_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.D.element, + operation_desc.D.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)}, + problem_.batch_count * gemm_workspace_.problem_count + ); + } + + if (options.execution_mode != ExecutionMode::kDryRun) { + // NOTE: the leading non-batch strides are duplicated here for 3.0 API kernels + gemm_workspace_.arguments.problem_size = {int(problem_.m), int(problem_.n), int(problem_.k)}; + gemm_workspace_.arguments.batch_count = problem_.batch_count; + gemm_workspace_.arguments.lda = problem_.lda; + gemm_workspace_.arguments.ldb = problem_.ldb; + gemm_workspace_.arguments.ldc = problem_.ldc; + gemm_workspace_.arguments.ldd = problem_.ldc; + gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride(); + gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride(); + gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride(); + gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride(); + + /* Query device SM count to pass onto the kernel as an argument, where needed */ + gemm_workspace_.arguments.sm_count = options.device.properties.multiProcessorCount; + } + + // + // Initialize the CUTLASS operation + // + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = underlying_operation->get_host_workspace_size(&gemm_workspace_.configuration); + gemm_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = underlying_operation->get_device_workspace_size(&gemm_workspace_.configuration, + &gemm_workspace_.arguments); + gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + status = underlying_operation->initialize( + &gemm_workspace_.configuration, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data()); + + if (status != Status::kSuccess) { + return status; + } + + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + workspace_size = reduction_op_->get_host_workspace_size(&gemm_workspace_.reduction_configuration); + gemm_workspace_.reduction_host_workspace.resize(workspace_size, 0); + + status = reduction_op_->initialize( + &gemm_workspace_.reduction_configuration, + gemm_workspace_.reduction_host_workspace.data(), + nullptr); + + if (status != Status::kSuccess) { + return status; + } + } + } + + // + // If CUTLASS is enabled, generate a result for it + // + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kGemm; + results_.back().disposition = Disposition::kNotRun; + + for (auto provider : verification_providers_) { + results_.back().verification_map[provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool GemmOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + // Initialize structure containing GEMM arguments + gemm_workspace_.arguments.A = gemm_workspace_.A->data(); + gemm_workspace_.arguments.B = gemm_workspace_.B->data(); + gemm_workspace_.arguments.C = gemm_workspace_.C->data(); + gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride(); + gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride(); + gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride(); + gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride(); + + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data(); + gemm_workspace_.arguments.alpha = problem_.alpha_one.data(); + gemm_workspace_.arguments.beta = problem_.beta_zero.data(); + + gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data(); + gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data(); + gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data(); + gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data(); + gemm_workspace_.reduction_arguments.beta = problem_.beta.data(); + gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; + } + + // + // Run the CUTLASS operation + // + + // initialize gemm underlying operation to handle parallel reduction + library::Operation const * underlying_operation = operation; + + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) { + results_.back().disposition = Disposition::kFailed; + return false; + } + } + + results_.back().status = underlying_operation->run( + &gemm_workspace_.arguments, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // Run parallel reduction kernel for parallel split_k_mode + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + results_.back().status = reduction_op_->run( + &gemm_workspace_.reduction_arguments, + gemm_workspace_.reduction_host_workspace.data(), + nullptr); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + } + + cudaError_t result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + +#if CUTLASS_ENABLE_CUBLAS + if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { + + // Guard against unsupported cases + auto const & gemm_desc = static_cast(operation->description()); + + if (cublas_satisfies(gemm_desc) == Status::kSuccess) { + + // call cublas verification if supported + verify_with_cublas_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + else { + // set verification map for cublas to not supported + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; + } + } +#endif // #if CUTLASS_ENABLE_CUBLAS + + bool verification_status = verify_with_reference_(options, report, device_context, operation, problem_space, problem); + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + for (auto &m : results_.back().verification_map) { + if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if (!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if (is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // if verification.required is set, then return success iff at least one ref-check was run + if (options.verification.required) { + bool did_any_verification_run = false; + for (auto provider : options.verification.providers) { + did_any_verification_run |= (Disposition::kNotRun != results_.back().verification_map[provider]); + } + + if (not did_any_verification_run) { + results_.back().status = Status::kErrorNotSupported; + return false; + } + } + + // Return true means continue profiling + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool GemmOperationProfiler::verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + +#if CUTLASS_ENABLE_CUBLAS + + library::GemmDescription const &gemm_desc = + static_cast(operation->description()); + + // + // Construct cuBLAS operators + // + + CublasCreate handle; + cublasStatus_t status = handle.get_cublas_create_status(); + + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status); + return true; + } + + std::vector algorithms; + + detail::select_cublas_algorithms( + algorithms, + options, + gemm_desc); + + if (algorithms.empty()) { + // no algorithm selected + return true; + } + + // + // Initialize state + // + + try { + + // + // Construct dispatcher to cublasGemmEx() + // + + // Initialize structure containing GEMM arguments + gemm_workspace_.arguments.A = gemm_workspace_.A->data(); + gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride(); + gemm_workspace_.arguments.B = gemm_workspace_.B->data(); + gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride(); + gemm_workspace_.arguments.C = gemm_workspace_.Reference->data(); + gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.Reference->batch_stride(); + gemm_workspace_.arguments.D = gemm_workspace_.Reference->data(); + gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Reference->batch_stride(); + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + detail::cublasGemmExDispatcher gemm_op( + gemm_desc, + gemm_workspace_.configuration, + gemm_workspace_.arguments, + algorithms.front() + ); + + if (gemm_op.status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; + return true; + } + + results_.back().status = Status::kSuccess; + + status = gemm_op(handle); + + // Handle errors + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status); + return true; + } + + // + // Verify results + // + + results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( + options, + *gemm_workspace_.Computed, + *gemm_workspace_.Reference, + gemm_workspace_.Computed->batch_stride() + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + gemm_desc, + library::Provider::kCUTLASS, + library::Provider::kCUBLAS); + } + } + catch (...) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + } + +#endif + + // Return true means continue profiling + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against host and device references +bool GemmOperationProfiler::verify_with_reference_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::GemmDescription const &gemm_desc = + static_cast(operation->description()); + + // + // Initialize state + // + + for (auto provider : options.verification.providers) { + + // Skip providers that are not enabled + if (!options.verification.provider_enabled(provider)) { + continue; + } + + void *ptr_A = gemm_workspace_.A->data(); + void *ptr_B = gemm_workspace_.B->data(); + void *ptr_C = gemm_workspace_.C->data(); + void *ptr_D = gemm_workspace_.Reference->data(); + + // To support the host-side reference, conditionally allocate and + // copy tensors to host memory. + std::vector host_data_A; + std::vector host_data_B; + std::vector host_data_C; + std::vector host_data_D; + + if (provider == library::Provider::kReferenceHost) { + + host_data_A.resize(gemm_workspace_.A->bytes()); + ptr_A = host_data_A.data(); + gemm_workspace_.A->copy_to_host(ptr_A); + + host_data_B.resize(gemm_workspace_.B->bytes()); + ptr_B = host_data_B.data(); + gemm_workspace_.B->copy_to_host(ptr_B); + + host_data_C.resize(gemm_workspace_.C->bytes()); + ptr_C = host_data_C.data(); + gemm_workspace_.C->copy_to_host(ptr_C); + + host_data_D.resize(gemm_workspace_.Reference->bytes()); + ptr_D = host_data_D.data(); + } + + // + // Launch + // + + library::Handle handle; + + handle.set_provider(provider); + + Status status = handle.gemm_universal( + problem_.mode, + gemm_workspace_.configuration.problem_size.m(), + gemm_workspace_.configuration.problem_size.n(), + gemm_workspace_.configuration.problem_size.k(), + gemm_desc.tile_description.math_instruction.element_accumulator, + gemm_desc.element_epilogue, + + problem_.alpha.data(), + + gemm_desc.A.element, + gemm_desc.A.layout, + gemm_desc.transform_A, + ptr_A, + int(gemm_workspace_.configuration.lda), + + gemm_desc.B.element, + gemm_desc.B.layout, + gemm_desc.transform_B, + ptr_B, + int(gemm_workspace_.configuration.ldb), + + problem_.beta.data(), + + gemm_desc.C.element, + gemm_desc.C.layout, + ptr_C, + int(gemm_workspace_.configuration.ldc), + + gemm_desc.D.element, + gemm_desc.D.layout, + ptr_D, + int(gemm_workspace_.configuration.ldd), + + gemm_workspace_.configuration.batch_count, + gemm_workspace_.A->batch_stride(), + gemm_workspace_.B->batch_stride(), + gemm_workspace_.C->batch_stride(), + gemm_workspace_.Reference->batch_stride()); + + if (status != Status::kSuccess) { + results_.back().verification_map[provider] = Disposition::kNotRun; + continue; + } + + results_.back().status = status; + + if (provider == library::Provider::kReferenceHost) { + gemm_workspace_.Reference->copy_from_host(ptr_D); + } + + // + // Verify results + // + + results_.back().verification_map[provider] = compare_tensors( + options, + *gemm_workspace_.Computed, + *gemm_workspace_.Reference, + gemm_workspace_.Computed->batch_stride() + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[provider] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + gemm_desc, + library::Provider::kCUTLASS, + provider); + } + } + + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Measures performance results +bool GemmOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + // Initialize structure containing GEMM arguments + gemm_workspace_.arguments.A = gemm_workspace_.A->data(); + gemm_workspace_.arguments.B = gemm_workspace_.B->data(); + gemm_workspace_.arguments.C = gemm_workspace_.C->data(); + gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride(); + gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride(); + gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride(); + gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride(); + + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data(); + gemm_workspace_.arguments.alpha = problem_.alpha_one.data(); + gemm_workspace_.arguments.beta = problem_.beta_zero.data(); + + gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data(); + gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data(); + gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data(); + gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data(); + gemm_workspace_.reduction_arguments.beta = problem_.beta.data(); + gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; + } + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &gemm_workspace_.arguments, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data() + ); + } + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Method to profile a CUTLASS Operation +Status GemmOperationProfiler::profile_cutlass_( + double &runtime, + Options const &options, + library::Operation const *operation, + void *arguments, + void *host_workspace, + void *device_workspace) { + + GpuTimer timer; + // initialize gemm underlying operation to handle parallel reduction + library::Operation const * underlying_operation = operation; + + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) { + return Status::kErrorNotSupported; + } + } + + // + // Optional sleep to limit power consumption and thermals + // + + sleep(options.profiling.sleep_duration); + + // + // Warmup loop + // + + Status status; + + for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { + + int problem_idx = (iteration % gemm_workspace_.problem_count) * problem_.batch_count; + + gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx); + gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx); + gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx); + gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx); + + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data(); + + gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data(); + gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx); + gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx); + } + + // Execute the CUTLASS operation + status = underlying_operation->run( + &gemm_workspace_.arguments, + host_workspace, + device_workspace); + + if (status != Status::kSuccess) { + return status; + } + + // Run parallel reduction kernel for parallel split_k_mode + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + status = reduction_op_->run( + &gemm_workspace_.reduction_arguments, + gemm_workspace_.reduction_host_workspace.data(), + nullptr); + + if (status != Status::kSuccess) { + return status; + } + } + } + + // + // Initialize GPU timer + // + + timer.start(); + + // + // Profiling loop + // + + int Iterations = options.profiling.iterations; + + int iteration = 0; + for (; iteration < Iterations; ++iteration) { + + // Iterate over copies of the problem in memory + int workspace_idx = options.profiling.warmup_iterations + iteration; + int problem_idx = (workspace_idx % gemm_workspace_.problem_count) * problem_.batch_count; + + gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx); + gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx); + gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx); + gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx); + + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data(); + + gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data(); + gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx); + gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx); + } + + status = underlying_operation->run( + arguments, + host_workspace, + device_workspace); + + if (status != Status::kSuccess) { + return status; + } + + // Run parallel reduction kernel for parallel split_k_mode + if (problem_.split_k_mode == library::SplitKMode::kParallel) { + status = reduction_op_->run( + &gemm_workspace_.reduction_arguments, + gemm_workspace_.reduction_host_workspace.data(), + nullptr); + + if (status != Status::kSuccess) { + return status; + } + } + } + + // + // Wait for completion + // + + timer.stop_and_wait(); + // + // Update performance result + // + + runtime = timer.duration(iteration); + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/gpu_timer.cpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/gpu_timer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..67d218f8ce1bca4a01d1fc0790007909c7b0199a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/gpu_timer.cpp @@ -0,0 +1,113 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function +*/ + +#include + +#include "cutlass/profiler/gpu_timer.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +GpuTimer::GpuTimer() { + cudaError_t result; + + for (auto & event : events) { + result = cudaEventCreate(&event); + if (result != cudaSuccess) { + throw std::runtime_error("Failed to create CUDA event"); + } + } +} + +GpuTimer::~GpuTimer() { + for (auto & event : events) { + cudaEventDestroy(event); + } +} + +/// Records a start event in the stream +void GpuTimer::start(cudaStream_t stream) { + cudaError_t result = cudaEventRecord(events[0], stream); + if (result != cudaSuccess) { + throw std::runtime_error("Failed to record start event."); + } +} + +/// Records a stop event in the stream +void GpuTimer::stop(cudaStream_t stream) { +cudaError_t result = cudaEventRecord(events[1], stream); + if (result != cudaSuccess) { + throw std::runtime_error("Failed to record stop event."); + } +} + +/// Records a stop event in the stream and synchronizes on the stream +void GpuTimer::stop_and_wait(cudaStream_t stream) { + + stop(stream); + + cudaError_t result; + if (stream) { + result = cudaStreamSynchronize(stream); + if (result != cudaSuccess) { + throw std::runtime_error("Failed to synchronize with non-null CUDA stream."); + } + } + else { + result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + throw std::runtime_error("Failed to synchronize with CUDA device."); + } + } +} + +/// Returns the duration in milliseconds +double GpuTimer::duration(int iterations) const { + + float avg_ms; + + cudaError_t result = cudaEventElapsedTime(&avg_ms, events[0], events[1]); + if (result != cudaSuccess) { + throw std::runtime_error("Failed to query elapsed time from CUDA events."); + } + + return double(avg_ms) / double(iterations); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/main.cpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..79eae7e9696303384e898739fa600e88f3bd0e3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/main.cpp @@ -0,0 +1,53 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief +*/ + +#include + +#include "cutlass/profiler/options.h" + +#include "cutlass/profiler/cutlass_profiler.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +int main(int argc, char const *arg[]) { + + cutlass::CommandLine cmdline(argc, arg); + cutlass::profiler::Options options(cmdline); + + cutlass::profiler::CutlassProfiler profiler(options); + + return profiler(); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..c9634b2bdfc3993f7c2b7e0bf879ed9491030a35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/operation_profiler.cu @@ -0,0 +1,707 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines a math function +*/ + +#include +#include +#include +#include +#include +#include + +#ifdef __unix__ +#include +#elif defined(_WIN32) || defined(WIN32) +#include +#else +// sleep not supported +#endif + +#include "cutlass/profiler/options.h" +#include "cutlass/profiler/operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +OperationProfiler::OperationProfiler(): kind_(library::OperationKind::kInvalid) { } + +/// Ctor +OperationProfiler::OperationProfiler( + Options const &options, + library::OperationKind kind, + ArgumentDescriptionVector const &arguments, + ProviderVector const & verification_providers +): + kind_(kind), arguments_(arguments) { + + ArgumentDescriptionVector tile_description_arguments{ + {ArgumentTypeID::kEnumerated, {"op_class", "opcode-class"}, "Class of math instruction (simt, tensorop, wmmatensorop, wmma)"}, + {ArgumentTypeID::kEnumerated, {"accum", "accumulator-type"}, "Math instruction accumulator data type"}, + {ArgumentTypeID::kInteger, {"cta_m", "threadblock-shape::m"}, "Threadblock shape in the M dimension"}, + {ArgumentTypeID::kInteger, {"cta_n", "threadblock-shape::n"}, "Threadblock shape in the N dimension"}, + {ArgumentTypeID::kInteger, {"cta_k", "threadblock-shape::k"}, "Threadblock shape in the K dimension"}, + {ArgumentTypeID::kInteger, {"cluster_m", "cluster-shape::m"}, "Cluster shape in the M dimension"}, + {ArgumentTypeID::kInteger, {"cluster_n", "cluster-shape::n"}, "Cluster shape in the N dimension"}, + {ArgumentTypeID::kInteger, {"cluster_k", "cluster-shape::k"}, "Cluster shape in the K dimension"}, + {ArgumentTypeID::kInteger, {"stages", "threadblock-stages"}, "Number of stages of threadblock-scoped matrix multiply"}, + {ArgumentTypeID::kInteger, {"warps_m", "warp-count::m"}, "Number of warps within threadblock along the M dimension"}, + {ArgumentTypeID::kInteger, {"warps_n", "warp-count::n"}, "Number of warps within threadblock along the N dimension"}, + {ArgumentTypeID::kInteger, {"warps_k", "warp-count::k"}, "Number of warps within threadblock along the K dimension"}, + {ArgumentTypeID::kInteger, {"inst_m", "instruction-shape::m"}, "Math instruction shape in the M dimension"}, + {ArgumentTypeID::kInteger, {"inst_n", "instruction-shape::n"}, "Math instruction shape in the N dimension"}, + {ArgumentTypeID::kInteger, {"inst_k", "instruction-shape::k"}, "Math instruction shape in the K dimension"}, + {ArgumentTypeID::kInteger, {"min_cc", "minimum-compute-capability"}, "Minimum device compute capability"}, + {ArgumentTypeID::kInteger, {"max_cc", "maximum-compute-capability"}, "Maximum device compute capability"} + }; + + arguments_.insert(arguments_.end(), tile_description_arguments.begin(), tile_description_arguments.end()); + + for (auto provider : verification_providers) { + if (std::find( + options.verification.providers.begin(), + options.verification.providers.end(), + provider) != options.verification.providers.end()) { + + verification_providers_.push_back(provider); + } + } + +} + +/// Destructor +OperationProfiler::~OperationProfiler() {} + +/// Gets the schema description +std::string const & OperationProfiler::description() const { + return description_; +} + +/// Prints usage statement for the math function +void OperationProfiler::print_usage(std::ostream &out) const { + for (auto const & desc : arguments_) { + + size_t const kAliasStart = 10; + + size_t columns = 0; + + std::string type_str = to_string(desc.type); + columns += type_str.size(); + + out << " [" << type_str << "]"; + + if (columns < kAliasStart) { + out << std::string(kAliasStart - columns, ' '); + } + + columns = 0; + + int j = 0; + for (auto const & alias : desc.aliases) { + columns += alias.size() + (j ? 1 : 0) + 2; + + out << (j++ ? "," : "") << "--" << alias; + } + + size_t const kTotalColumns = 50; + + if (columns < kTotalColumns) { + out << std::string(kTotalColumns - columns, ' '); + } + + out << desc.description << "\n"; + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if the current operation description satisfies the problem space +bool OperationProfiler::satisfies( + library::OperationDescription const &op_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::OpcodeClassID opcode_class; + if (arg_as_OpcodeClassID(opcode_class, "op_class", problem_space, problem)) { + if (opcode_class != op_desc.tile_description.math_instruction.opcode_class) { + return false; + } + } + + int64_t int_value; + + if (arg_as_int(int_value, "inst_m", problem_space, problem)) { + if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.m()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "inst_n", problem_space, problem)) { + if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.n()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "inst_k", problem_space, problem)) { + if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.k()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "cta_m", problem_space, problem)) { + if (int64_t(op_desc.tile_description.threadblock_shape.m()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "cta_n", problem_space, problem)) { + if (int64_t(op_desc.tile_description.threadblock_shape.n()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "cta_k", problem_space, problem)) { + if (int64_t(op_desc.tile_description.threadblock_shape.k()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "cluster_m", problem_space, problem)) { + if (int64_t(op_desc.tile_description.cluster_shape.m()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "cluster_n", problem_space, problem)) { + if (int64_t(op_desc.tile_description.cluster_shape.n()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "cluster_k", problem_space, problem)) { + if (int64_t(op_desc.tile_description.cluster_shape.k()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "stages", problem_space, problem)) { + if (int64_t(op_desc.tile_description.threadblock_stages) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "warps_m", problem_space, problem)) { + if (int64_t(op_desc.tile_description.warp_count.m()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "warps_n", problem_space, problem)) { + if (int64_t(op_desc.tile_description.warp_count.n()) != int_value) { + return false; + } + } + + if (arg_as_int(int_value, "warps_k", problem_space, problem)) { + if (int64_t(op_desc.tile_description.warp_count.k()) != int_value) { + return false; + } + } + + library::NumericTypeID numeric_type; + if (arg_as_NumericTypeID(numeric_type, "accum", problem_space, problem)) { + if (numeric_type != op_desc.tile_description.math_instruction.element_accumulator) { + return false; + } + } + + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Entry point to profile all operations in the manifest +int OperationProfiler::profile_all( + Options const &options, + library::Manifest const &manifest, + DeviceContext &device_context) { + + ProblemSpace problem_space(arguments_, options.cmdline); + + // 1. Construct performance report + PerformanceReport report(options, problem_space.argument_names(), kind_); + + // 2. For each problem in problem space + ProblemSpace::Iterator problem_it = problem_space.begin(); + ProblemSpace::Iterator problem_end = problem_space.end(); + + bool continue_profiling = true; + int retval = 0; + + // For each problem in problem space + for (; continue_profiling && problem_it != problem_end; ++problem_it) { + ProblemSpace::Problem problem = problem_it.at(); + report.next_problem(); + + // For each operation in manifest + int matched_operation_count = 0; + for (auto const& operation_ptr : manifest) { + + library::Operation const *operation = operation_ptr.get(); + + auto min_cc = operation->description().tile_description.minimum_compute_capability; + auto max_cc = operation->description().tile_description.maximum_compute_capability; + + // Clear named allocations + device_context.free(); + + // Execute compatible cutlass operations if they satisfy the current device's compute capability + if (operation->description().kind == kind_ && + operation->description().provider == library::Provider::kCUTLASS && + options.device.compute_capability() >= min_cc && + options.device.compute_capability() <= max_cc) { + + std::string operation_name(operation->description().name); + + // Filter kernels by name + bool filtered_by_name = options.operation_names.empty(); + if (!filtered_by_name) { + + for (auto const & op_name : options.operation_names) { + if (find_string_matches_(op_name, operation_name)) { + filtered_by_name = true; + break; + } + } + } + + for (auto const & op_name : options.excluded_operation_names) { + if (find_string_matches_(op_name, operation_name)) { + filtered_by_name = false; + break; + } + } + + if (!filtered_by_name || !satisfies(operation->description(), problem_space, problem)) { + continue; + } + + // we have found a kernel match, so increment the counter for match kernels + ++matched_operation_count; + + // A. Initialize configuration + Status status = this->initialize_configuration( + options, + report, + device_context, + operation, + problem_space, + problem); + + if (status == Status::kErrorInternal) { + + // If there was an internal error, consume the CUDA error and move to the next operation. + (void)cudaGetLastError(); + + report.append_results(results_); + continue; + } + else if (status != Status::kSuccess) { + // If the workspace could not be initialized for any other reason, continue to + // the next operation. + continue; + } + + if (continue_profiling) { + + if (options.report.print_kernel_before_running) { + std::cout << "Profiling kernel for JUnit test " << options.report.junit_output_path << ": " + << operation_name << std::endl; + } + + status = this->initialize_workspace( + options, + report, + device_context, + operation, + problem_space, + problem); + + if (status == Status::kErrorInternal) { + + // If there was an internal error, consume the CUDA error and move to the next operation. + (void)cudaGetLastError(); + + report.append_results(results_); + continue; + } + else if (status != Status::kSuccess) { + // If the workspace could not be initialized for any other reason, continue to + // the next operation. + continue; + } + } + + // + // Profile CUTLASS if it is enabled + // + + // B. Verify CUTLASS + if (continue_profiling && options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + continue_profiling = this->verify_cutlass( + options, + report, + device_context, + operation, + problem_space, + problem); + + retval |= (not continue_profiling); + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + report.append_results(results_); + results_.clear(); + continue; + } + + // + // C. Optionally save workspace + // + + if (options.verification.save_workspace == SaveWorkspace::kAlways) { + save_workspace( + device_context, + options, + operation->description(), + library::Provider::kCUTLASS); + } + + // + // D. Profile + // + + if (continue_profiling && options.profiling.enabled) { + + continue_profiling = this->profile( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + report.append_results(results_); + results_.clear(); + } + + if (!continue_profiling) { + break; + } + } + + // If we did not find any kernels that match our filters and error_on_no_match was set, report an error + if (options.profiling.error_on_no_match && matched_operation_count <= 0) { + #if !NDEBUG + std::cout << "Error: No matching kernels found with kernel selection filters [--error_on_no_match]" << std::endl; + #endif + retval = 1; + } + } + + return retval; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Sleep for a given duration in ms +void OperationProfiler::sleep(int sleep_duration) { + if (sleep_duration) { + #ifdef __unix__ + usleep(sleep_duration * 1000); + #elif defined(_WIN32) || defined(WIN32) + SleepEx(sleep_duration, false); + #else + // sleep not supported + #endif + } +} + + +/// Compares tensors for equality +Disposition OperationProfiler::compare_tensors( + Options const &options, + DeviceAllocation &experimental, + DeviceAllocation &reference, + int64_t count) { + + if (experimental.type() != reference.type()) { + return Disposition::kIncorrect; + } + + bool passed = false; + + if (count == 0) { + count = reference.capacity(); + } + + if (options.verification.epsilon == 0) { + + // bit-level equality + passed = DeviceAllocation::block_compare_equal( + experimental.type(), + experimental.data(), + reference.data(), + count); + } + else { + + // relative error function + passed = DeviceAllocation::block_compare_relatively_equal( + experimental.type(), + experimental.data(), + reference.data(), + count, + options.verification.epsilon, + options.verification.nonzero_floor); + } + + return passed ? Disposition::kPassed : Disposition::kIncorrect; +} + +/// Saves the workspace +void OperationProfiler::save_workspace( + DeviceContext &device_context, + Options const &options, + library::OperationDescription const &desc, + library::Provider provider, + library::Provider verification_provider) { + + for (auto const & named_allocation : device_context) { + + DeviceAllocation *allocation = named_allocation.second; + + std::stringstream filename; + + filename << desc.name << "_" << library::to_string(provider) << "_"; + + if (verification_provider != library::Provider::kInvalid) { + filename << "verified_by_" << library::to_string(verification_provider) << "_"; + } + + filename << named_allocation.first + ".mat"; + + std::ofstream out(filename.str()); + + allocation->write_tensor_csv(out); + out << "\n"; + + if (options.report.verbose) { + std::cout << "wrote '" << filename.str() << "'" << std::endl; + } + } +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Method to profile a CUTLASS Operation +Status OperationProfiler::profile_cutlass_( + double &runtime, + Options const &options, + library::Operation const *operation, + void *arguments, + void *host_workspace, + void *device_workspace) { + + GpuTimer timer; + + // + // Optional sleep to limit power consumption and thermals + // + + sleep(options.profiling.sleep_duration); + + // + // Warmup loop + // + + Status status; + + for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { + + status = operation->run( + arguments, + host_workspace, + device_workspace); + + if (status != Status::kSuccess) { + return status; + } + } + + // + // Initialize GPU timer + // + + timer.start(); + + // + // Profiling loop + // + + int Iterations = options.profiling.iterations; + + int iteration = 0; + for (; iteration < Iterations; ++iteration) { + + status = operation->run( + arguments, + host_workspace, + device_workspace); + + if (status != Status::kSuccess) { + return status; + } + } + + // + // Wait for completion + // + + timer.stop_and_wait(); + + // + // Update performance result + // + + runtime = timer.duration(iteration); + + return status; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Sets operation description +void OperationProfiler::initialize_result_( + PerformanceResult &result, + library::OperationDescription const &operation_desc, + ProblemSpace const &problem_space) { + + set_argument(result, "op_class", problem_space, + library::to_string(operation_desc.tile_description.math_instruction.opcode_class)); + + set_argument(result, "accum", problem_space, + library::to_string(operation_desc.tile_description.math_instruction.element_accumulator)); + + set_argument(result, "cta_m", problem_space, operation_desc.tile_description.threadblock_shape.m()); + set_argument(result, "cta_n", problem_space, operation_desc.tile_description.threadblock_shape.n()); + set_argument(result, "cta_k", problem_space, operation_desc.tile_description.threadblock_shape.k()); + set_argument(result, "cluster_m", problem_space, operation_desc.tile_description.cluster_shape.m()); + set_argument(result, "cluster_n", problem_space, operation_desc.tile_description.cluster_shape.n()); + set_argument(result, "cluster_k", problem_space, operation_desc.tile_description.cluster_shape.k()); + set_argument(result, "stages", problem_space, operation_desc.tile_description.threadblock_stages); + set_argument(result, "warps_m", problem_space, operation_desc.tile_description.warp_count.m()); + set_argument(result, "warps_n", problem_space, operation_desc.tile_description.warp_count.n()); + set_argument(result, "warps_k", problem_space, operation_desc.tile_description.warp_count.k()); + set_argument(result, "inst_m", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.m()); + set_argument(result, "inst_n", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.n()); + set_argument(result, "inst_k", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.k()); + set_argument(result, "min_cc", problem_space, operation_desc.tile_description.minimum_compute_capability); + set_argument(result, "max_cc", problem_space, operation_desc.tile_description.maximum_compute_capability); +} + +/// Helper +void OperationProfiler::set_argument( + PerformanceResult &result, + char const *name, + ProblemSpace const &problem_space, + std::string const &value) { + + result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), value); +} + +void OperationProfiler::set_argument( + PerformanceResult &result, + char const *name, + ProblemSpace const &problem_space, + int64_t value) { + + result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), library::lexical_cast(value)); +} + + +/// finds string matches filter_string in operation_name +bool OperationProfiler::find_string_matches_( + std::string const &filter_string, + std::string const &operation_name) { + // Returns true if all substrings appear in the operation_name in order + + // Split filter_string of the format "gemm*f32*nt" to tokens ["gemm", "f32", "nt"] + std::string item; + std::istringstream iss(filter_string); + std::vector filter_tokens; + while (std::getline(iss, item, '*')) { + filter_tokens.push_back(item); + } + + // Search filter_tokens in operation_name in order + size_t start = 0, idx = 0; + for (auto & token : filter_tokens) { + // Check if characters left to be parsed in operation_name + if (start < operation_name.length()) { + // Find token in operation_name[start:] + idx = operation_name.substr(start).find(token); + if (idx == std::string::npos) { + return false; + } + } + start += (idx + token.length()); + } + + // All tokens in filter_string found in operation_name + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/options.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/options.cu new file mode 100644 index 0000000000000000000000000000000000000000..6f714434f0cbf8d00237f68bbf8c019b77e3dc74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/options.cu @@ -0,0 +1,837 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Command line options for performance test program +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/version.h" + +#include "cutlass/library/util.h" + +#include "cutlass/profiler/options.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Newline and indent for help strings +static char const *end_of_line = "\n "; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Device::Device(cutlass::CommandLine const &cmdline) { + + cmdline.get_cmd_line_argument("device", device, 0); + + cudaError_t result; + result = cudaGetDeviceProperties(&properties, device); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties() failed for given device"); + } + + result = cudaSetDevice(device); + if (result != cudaSuccess) { + throw std::runtime_error("cudaSetDevice() failed for given device."); + } + + // Permit overriding the compute capability + if (cmdline.check_cmd_line_flag("compute-capability")) { + int cc = compute_capability(); + cmdline.get_cmd_line_argument("compute-capability", cc, cc); + properties.major = cc / 10; + properties.minor = cc % 10; + } + + // Permit overriding the L2 cache capacity + if (cmdline.check_cmd_line_flag("llc-capacity")) { + int llc_capacity = 0; + cmdline.get_cmd_line_argument("llc-capacity", llc_capacity, 0); + + if (llc_capacity >= 0) { + properties.l2CacheSize = (llc_capacity << 10); + } + } + +} + +void Options::Device::print_usage(std::ostream &out) const { + + out << "Device:\n" + << " --device= " + << " CUDA Device ID\n\n"; + + int device_count = 0; + cudaError_t result = cudaGetDeviceCount(&device_count); + + if (result != cudaSuccess) { + out << " \n"; + } + else { + + for (int idx = 0; idx < device_count; ++idx) { + cudaDeviceProp prop; + result = cudaGetDeviceProperties(&prop, idx); + if (result != cudaSuccess) { + out << " " << std::endl; + break; + } + else { + out << " [" << idx << "] - " + << prop.name << " - SM " << prop.major << "." << prop.minor << ", " + << prop.multiProcessorCount << " SMs @ " << (prop.clockRate / 1000.0) << " MHz, " + << "L2 cache: " << (prop.l2CacheSize >> 20) << " MB, Global Memory: " << (prop.totalGlobalMem >> 30) << " GB" + << std::endl; + } + } + out << "\n"; + } + + out + << " --compute-capability= " + << " Override the compute capability.\n\n" + + << " --llc-capacity= " + << " Capacity of last-level cache in kilobytes. If this is non-zero," << end_of_line + << " profiling phases cycle through different input tensors to induce" << end_of_line + << " capacity misses in the L2.\n\n"; + +} + +void Options::Device::print_device_info(std::ostream &out) const { + int num_devices; + cudaDeviceProp props; + + cudaError_t result; + result = cudaGetDeviceCount(&num_devices); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetNumDevices() failed"); + } + + out << "Device Name,SM,CUDA Device ID,Phy Device ID" << std::endl; + + for (int device = 0; device < num_devices; device++) { + result = cudaSetDevice(device); + if (result != cudaSuccess) { + throw std::runtime_error("cudaSetDevice() failed for device"); + } + + result = cudaGetDeviceProperties(&props, device); + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProperties failed for device"); + } + + out << props.name << "," << props.major << props.minor << "," + << device << "," << props.multiGpuBoardGroupID << std::endl; + + } +} + +void Options::Device::print_options(std::ostream &out, int indent) const { + + out + << indent_str(indent) << "device: " << device << "\n" + << indent_str(indent) << "clock: " << int(double(properties.clockRate) / 1000.0) << "\n" + << indent_str(indent) << "compute-capability: " << compute_capability() << "\n"; +} + +/// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75) +int Options::Device::compute_capability() const { + return properties.major * 10 + properties.minor; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Initialization::Initialization(cutlass::CommandLine const &cmdline) { + + cmdline.get_cmd_line_argument("initialization-enabled", enabled, true); + + if (cmdline.check_cmd_line_flag("initialization-provider")) { + std::string str; + cmdline.get_cmd_line_argument("initialization-provider", str); + provider = library::from_string(str); + if (provider == library::Provider::kInvalid) { + enabled = false; + } + else if (provider != library::Provider::kReferenceHost && provider != library::Provider::kReferenceDevice) { + throw std::runtime_error("Unsupported initialization provider specified."); + } + } + else { + provider = library::Provider::kReferenceDevice; + } + + cmdline.get_cmd_line_argument("seed", seed, 2019); + + if (cmdline.check_cmd_line_flag("dist")) { + // user has set the data distribution (fix data distribution once set) + fix_data_distribution = true; + // set user provided data distribution + get_distribution(cmdline, "dist", data_distribution); + } + else { + // profiler chosen data distribution (allowed to change based on numeric types) + fix_data_distribution = false; + // set uniform data distribution with range [-4, 4] + data_distribution.set_uniform(-4, 4, 0); + } + + +} + +/// Gets the initial distribution +void Options::Initialization::get_distribution( + cutlass::CommandLine const &args, + std::string const &arg, + cutlass::Distribution &dist) { + + struct { + const char *label; + cutlass::Distribution::Kind kind; + } distribution_kinds[] = { + {"uniform", cutlass::Distribution::Uniform}, + {"gaussian", cutlass::Distribution::Gaussian}, + {"identity", cutlass::Distribution::Identity}, + {"sequential", cutlass::Distribution::Sequential}, + {0, cutlass::Distribution::Invalid} + }; + + struct { + char const *label; + double *member; + } members[] = { + {"min", &dist.uniform.min}, + {"max", &dist.uniform.max}, + {"mean", &dist.gaussian.mean}, + {"stddev", &dist.gaussian.stddev}, + {"pnzA", &dist.gaussian.pnzA}, + {"pnzB", &dist.gaussian.pnzB}, + {"pnzC", &dist.gaussian.pnzC}, + {"start", &dist.sequential.start}, + {"delta", &dist.sequential.delta}, + {0, 0} + }; + + // Initalize pnz values to a default value of 100% + dist.gaussian.pnz = 100.0; + dist.gaussian.pnzA = 100.0; + dist.gaussian.pnzB = 100.0; + dist.gaussian.pnzC = 100.0; + + using KeyValueVector = std::vector >; + + KeyValueVector values; + args.get_cmd_line_argument_pairs(arg.c_str(), values); + + // The parser expects the first token to be a string identifying the distribution type. + auto it = values.begin(); + if (it != values.end()) { + for (int i = 0; distribution_kinds[i].label; ++i) { + if (it->first == distribution_kinds[i].label) { + dist.kind = distribution_kinds[i].kind; + break; + } + } + ++it; + } + + // Subsequent key-value pairs update the named field of the distribution struct. + for (; it != values.end(); ++it) { + // Integer scaling factor - if < 0, no integer rounding is performed. + if ((it->first.compare("scale") == 0) && !it->second.empty()) { + std::stringstream ss; + ss << it->second; + ss >> dist.int_scale; + continue; // next token + } + + // Casts as integer without scaling + if (it->first.compare("integer") == 0) { + dist.int_scale = 0; + continue; // next token + } + + // initialize other members + for (int m = 0; members[m].label; ++m) { + if (it->first == members[m].label && !it->second.empty()) { + std::stringstream ss; + ss << it->second; + ss >> *(members[m].member); + } + } + } +} + +void Options::Initialization::print_usage(std::ostream &out) const { + + out << "Initialization:\n" + + << " --initialization= " + << " Enables initialization (default: true). If false, device memory is" << end_of_line + << " not initialized after allocation.\n\n" + + << " --initialization-provider= " + << " Selects initialization provider {host, device*}. (default: '*')\n\n" + + << " --dist= " + << " Data distribution of input tensors {uniform*, gaussian, identity, sequential}" << end_of_line + << " --dist=uniform,min:,max:,scale:" << end_of_line + << " --dist=gaussian,mean:,stddev:,scale:,pnzA:,pnzB:,pnzC:" << end_of_line + << " --dist=sequential,start:,delta:,scale:" << end_of_line + << " --dist=identity\n\n" + + << " --seed= " + << " Random number generator seed. Used to enforce deterministic" << end_of_line + << " initialization.\n\n"; + +} + +void Options::Initialization::print_options(std::ostream &out, int indent) const { + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Library::Library(cutlass::CommandLine const &cmdline) { + + algorithm_mode = AlgorithmMode::kDefault; + + if (cmdline.check_cmd_line_flag("library-algo-mode")) { + std::string mode = "default"; + cmdline.get_cmd_line_argument("library-algo-mode", mode); + algorithm_mode = from_string(mode); + } + + if (cmdline.check_cmd_line_flag("library-algos")) { + + // If algorithms are specified, override as kBest. + algorithm_mode = AlgorithmMode::kBest; + + std::vector tokens; + cmdline.get_cmd_line_arguments("library-algos", tokens); + + algorithms.reserve(tokens.size()); + + for (auto const & token : tokens) { + if (token.find(":")) { + // TODO: tokenized range + } + else { + int algo; + std::stringstream ss; + + ss << token; + ss >> algo; + + algorithms.push_back(algo); + } + } + } +} + +void Options::Library::print_usage(std::ostream &out) const { + + out << "Library:\n" + + << " --library-algo-mode= " + << " Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN.\n" + << " " + << " mode={default*,matching,best}\n\n" + + << " --library-algos= " + << " If --algorithm-mode=best, permits specifying a selection of algorithms.\n\n"; + +} + +void Options::Library::print_options(std::ostream &out, int indent) const { + + out + << indent_str(indent) << "library-algo-mode: " << to_string(algorithm_mode) << "\n" + << indent_str(indent) << "library-algos: "; + + int j = 0; + for (int x : algorithms) { + out << (j++ ? "," : "") << x; + } + + out << "\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Profiling::Profiling(cutlass::CommandLine const &cmdline) { + + cmdline.get_cmd_line_argument("workspace-count", workspace_count, 0); + cmdline.get_cmd_line_argument("warmup-iterations", warmup_iterations, 10); + cmdline.get_cmd_line_argument("profiling-iterations", iterations, 100); + cmdline.get_cmd_line_argument("sleep-duration", sleep_duration, 50); + cmdline.get_cmd_line_argument("profiling-enabled", enabled, true); + + if (cmdline.check_cmd_line_flag("providers")) { + + std::vector tokens; + cmdline.get_cmd_line_arguments("providers", tokens); + + providers.clear(); + + for (auto const &token : tokens) { + providers.push_back(library::from_string(token)); + } + } + else { + providers.push_back(library::Provider::kCUTLASS); + providers.push_back(library::Provider::kCUBLAS); + providers.push_back(library::Provider::kCUDNN); + } +} + +void Options::Profiling::print_usage(std::ostream &out) const { + + out << "Profiling:\n" + + << " --workspace-count= " + << " Number of discrete workspaces maintained to avoid cache-resident " << end_of_line + << " If zero (default), the amount is chosen for each workload based on " << end_of_line + << " capacity of the last-level cache.\n\n" + + << " --profiling-iterations= " + << " Number of iterations to profile each kernel. If zero, kernels" << end_of_line + << " are launched up to the profiling duration.\n\n" + + << " --warmup-iterations= " + << " Number of iterations to execute each kernel prior to profiling.\n\n" + + << " --sleep-duration= " + << " Number of ms to sleep between profiling periods (ms).\n\n" + + << " --profiling-enabled= " + << " If true, profiling is actually conducted.\n\n" + + ; +} + +void Options::Profiling::print_options(std::ostream &out, int indent) const { + + out + << indent_str(indent) << "profiling_iterations: " << iterations << "\n" + << indent_str(indent) << "sleep_duration: " << sleep_duration << "\n" + << indent_str(indent) << "profiling_enabled: " << enabled << "\n" + << indent_str(indent) << "providers: ["; + + int j = 0; + for (auto const & provider : providers) { + out << (j++ ? ", " : "") << library::to_string(provider); + } + out << "]\n"; +} + +/// Returns true if a provider is enabled +bool Options::Profiling::provider_enabled(library::Provider provider) const { + return std::find(providers.begin(), providers.end(), provider) != providers.end(); +} + +/// Returns the index of a provider if its enabled +size_t Options::Profiling::index(library::Provider provider) const { + size_t idx = 0; + for (auto const & x : providers) { + if (x == provider) { + return idx; + } + ++idx; + } + return idx; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Verification::Verification(cutlass::CommandLine const &cmdline) { + + cmdline.get_cmd_line_argument("verification-enabled", enabled, true); + if (enabled) { + cmdline.get_cmd_line_argument("verification-required", required, false); + } + + cmdline.get_cmd_line_argument("epsilon", epsilon, 0.05); + + cmdline.get_cmd_line_argument("nonzero-floor", nonzero_floor, 1.0 / 256.0); + + if (cmdline.check_cmd_line_flag("save-workspace")) { + std::string value; + cmdline.get_cmd_line_argument("save-workspace", value); + save_workspace = from_string(value); + } + else { + save_workspace = SaveWorkspace::kNever; + } + + if (cmdline.check_cmd_line_flag("verification-providers")) { + + std::vector tokens; + cmdline.get_cmd_line_arguments("verification-providers", tokens); + + providers.clear(); + + for (auto const &token : tokens) { + library::Provider provider = library::from_string(token); + if (provider != library::Provider::kInvalid) { + providers.push_back(provider); + } + } + } + else { + providers.push_back(library::Provider::kCUBLAS); + providers.push_back(library::Provider::kReferenceDevice); + providers.push_back(library::Provider::kCUDNN); + } +} + +void Options::Verification::print_usage(std::ostream &out) const { + + out << "Verification:\n" + + << " --verification-enabled= " + << " Whether to perform verification checks.\n\n" + + << " --epsilon= " + << " Error threshold. Setting to zero (default) requires" << end_of_line + << " bit-level equivalence.\n\n" + + << " --nonzero-floor= " + << " Results whose absolute value is less than this quantity" << end_of_line + << " are treated as zero for comparisons.\n\n" + + << " --save-workspace= " + << " Specifies when to save the GEMM inputs and results to the filesystem." << end_of_line + << " --save-workspace=never never save workspace (default)" << end_of_line + << " --save-workspace=incorrect save workspace for incorrect results" << end_of_line + << " --save-workspace=always always save workspace\n\n" + + << " --verification-providers= " + << " List of providers used to verify result. (default: '*')" << end_of_line + << " Gemm verification-providers {cublas*}" << end_of_line + << " Conv2d verification-providers {cudnn*, device*, host}" + << "\n\n"; +} + +void Options::Verification::print_options(std::ostream &out, int indent) const { + + out + << indent_str(indent) << "verification_enabled: " << enabled << "\n" + << indent_str(indent) << "epsilon: " << epsilon << "\n" + << indent_str(indent) << "save_workspace: " << to_string(save_workspace) << "\n" + << indent_str(indent) << "verification_providers: ["; + + int j = 0; + for (auto const & provider : providers) { + out << (j++ ? ", " : "") << library::to_string(provider); + } + out << "]\n"; +} + +/// Returns true if a provider is enabled +bool Options::Verification::provider_enabled(library::Provider provider) const { + return std::find(providers.begin(), providers.end(), provider) != providers.end(); +} + +/// Returns the index of a provider if its enabled +size_t Options::Verification::index(library::Provider provider) const { + size_t idx = 0; + for (auto const & x : providers) { + if (x == provider) { + return idx; + } + ++idx; + } + return idx; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Report::Report(cutlass::CommandLine const &cmdline) { + + cmdline.get_cmd_line_argument("append", append, false); + cmdline.get_cmd_line_argument("output", output_path); + cmdline.get_cmd_line_argument("junit-output", junit_output_path); + + if (cmdline.check_cmd_line_flag("tags")) { + cmdline.get_cmd_line_argument_pairs("tags", pivot_tags); + } + + cmdline.get_cmd_line_argument("report-not-run", report_not_run, false); + + cmdline.get_cmd_line_argument("verbose", verbose, true); + + cmdline.get_cmd_line_argument("sort-results", sort_results, false); + + cmdline.get_cmd_line_argument("print-kernel-before-running", print_kernel_before_running, false); +} + +void Options::Report::print_usage(std::ostream &out) const { + + out << "Report:\n" + + << " --append= " + << " If true, result is appended to possibly existing file. Otherwise, " << end_of_line + << " any existing file is overwritten.\n\n" + + << " --output= " + << " Path to output file for machine readable results. Operation kind and '.csv' is appended.\n\n" + + << " --junit-output= " + << " Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended.\n\n" + + << " --print-kernel-before-running= " + << " Prints the name of the kernel being profiled before running the kernel." << end_of_line + << " This is useful for determining which kernel is causing a run of the profiler to hang\n\n" + + << " --report-not-run= " + << " If true, reports the status of all kernels including those that" << end_of_line + << " do not satisfy the given arguments.\n\n" + + << " --tags= " + << " Inserts leading columns in output table and uniform values for each" << end_of_line + << " column. Useful for generating pivot tables.\n\n" + + << " --verbose= " + << " Prints human-readable text to stdout. If false, nothing is written to stdout.\n\n" + + << " --sort-results= " + << " Sorts results (by flops-per-byte).\n\n"; +} + +void Options::Report::print_options(std::ostream &out, int indent) const { + + out + << indent_str(indent) << "append: " << append << "\n" + << indent_str(indent) << "output: " << output_path << "\n" + << indent_str(indent) << "junit-output: " << junit_output_path << "\n" + << indent_str(indent) << "print-kernel-before-running: " << print_kernel_before_running << "\n" + << indent_str(indent) << "report-not-run: " << report_not_run << "\n" + << indent_str(indent) << "tags:\n"; + + for (auto const & tag : pivot_tags) { + out << indent_str(indent + 1) << tag.first << ": " << tag.second << "\n"; + } + + out + << indent_str(indent) << "verbose: " << verbose << "\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::About::About(cutlass::CommandLine const &cmdline) { + help = cmdline.check_cmd_line_flag("help"); + version = cmdline.check_cmd_line_flag("version"); + device_info = cmdline.check_cmd_line_flag("device-info"); +} + +void Options::About::print_usage(std::ostream &out) const { + + out << "About:\n" + << " --version "; + + print_version(out); + + out << "\n"; +} + +void Options::About::print_version(std::ostream &out) { + out << "CUTLASS " << cutlass::getVersionString() + << " built on " << __DATE__ << " at " << __TIME__; + if (!cutlass::getGitRevision().empty()) out << " with commit " << cutlass::getGitRevision() << ""; +} + +void Options::About::print_options(std::ostream &out, int indent) const { + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Options(cutlass::CommandLine const &cmdline): + cmdline(cmdline), + device(cmdline), + initialization(cmdline), + library(cmdline), + profiling(cmdline), + verification(cmdline), + report(cmdline), + about(cmdline) { + + if (cmdline.check_cmd_line_flag("mode")) { + std::string token; + cmdline.get_cmd_line_argument("mode", token); + execution_mode = from_string(token); + } + else { + execution_mode = ExecutionMode::kProfile; + } + + // Enumerating kernels is equivalent to a dry run. + if (execution_mode == ExecutionMode::kEnumerate) { + execution_mode = ExecutionMode::kDryRun; + } + + if (cmdline.check_cmd_line_flag("operation")) { + std::string str; + cmdline.get_cmd_line_argument("operation", str); + operation_kind = library::from_string(str); + } + else if (cmdline.check_cmd_line_flag("function")) { + std::string str; + cmdline.get_cmd_line_argument("function", str); + operation_kind = library::from_string(str); + } + else { + operation_kind = library::OperationKind::kInvalid; + } + + if (cmdline.check_cmd_line_flag("operation_names")) { + cmdline.get_cmd_line_arguments("operation_names", operation_names); + } + else if (cmdline.check_cmd_line_flag("kernels")) { + cmdline.get_cmd_line_arguments("kernels", operation_names); + profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match"); + } + + if (cmdline.check_cmd_line_flag("ignore-kernels")) { + cmdline.get_cmd_line_arguments("ignore-kernels", excluded_operation_names); + profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match"); + } + + // Prevent launches on the device for anything other than CUTLASS operation + // Allow verification only on host + if (execution_mode == ExecutionMode::kTrace) { + initialization.provider = library::Provider::kReferenceHost; + verification.providers = {library::Provider::kReferenceHost}; + profiling.enabled = false; + } +} + +void Options::print_usage(std::ostream &out) const { + + out + << "CUTLASS Profiler\n" + << "usage:\n\n" + << " cutlass_profiler [options]\n\n" + << " --help\n\n" + + << " --mode= " + << " Cutlass profiler execution mode." << end_of_line + << " --mode=profile regular verification and profiling (default)" << end_of_line + << " --mode=dry_run no kernels are launched or workspaces allocated" << end_of_line + << " --mode=enumerate lists all operation kind and operations" << end_of_line + << " --mode=trace executes a single device-side computation with" << end_of_line + << " no other kernel launches\n\n" + + << " --device-info " + << " Prints information on all GPUs present in the system\n\n" + + << " --operation= " + << " CUTLASS operation to profile.\n\n" + + << " --kernels= " + << " Filter operations by kernel names. For example, call all kernels with" << end_of_line + << " (\"s1688\" and \"nt\") or (\"s844\" and \"tn\" and \"align8\") in their" << end_of_line + << " operation name using --kernels=\"s1688*nt, s884*tn*align8\"\n\n" + + << " --ignore-kernels= " + << " Excludes kernels whose names match anything in this list.\n\n" + ; + + // + // Detailed options + // + + device.print_usage(out); + out << "\n"; + + initialization.print_usage(out); + out << "\n"; + + library.print_usage(out); + out << "\n"; + + profiling.print_usage(out); + out << "\n"; + + verification.print_usage(out); + out << "\n"; + + report.print_usage(out); + out << "\n"; + + about.print_usage(out); + out << "\n"; +} + +void Options::print_options(std::ostream &out) const { + + out + << "options:\n" + << " help: " << about.help << "\n" + << " mode: " << to_string(execution_mode) << "\n"; + + out + << " device:\n"; + device.print_options(out, 2); + + out + << " initialization:\n"; + initialization.print_options(out, 2); + + out + << " profiling:\n"; + profiling.print_options(out, 2); + + out + << " verification:\n"; + verification.print_options(out, 2); + + out + << " report:\n"; + report.print_options(out, 2); +} + +std::string Options::indent_str(int indent) { + return std::string(indent * 2, ' '); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/performance_report.cpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/performance_report.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c3b81d8767ce15ca9b7202c889dc6dfd98ee4abc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/performance_report.cpp @@ -0,0 +1,505 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment +*/ + +#include +#include +#include +#include +#include + +#include "cutlass/library/util.h" + +#include "cutlass/library/util.h" + +#include "cutlass/profiler/performance_report.h" +#include "cutlass/profiler/debug.h" +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if defined(__unix__) + +#define SHELL_COLOR_BRIGHT() "\033[1;37m" +#define SHELL_COLOR_GREEN() "\033[1;32m" +#define SHELL_COLOR_RED() "\033[1;31m" +#define SHELL_COLOR_END() "\033[0m" + +#else + +#define SHELL_COLOR_BRIGHT() "" +#define SHELL_COLOR_GREEN() "" +#define SHELL_COLOR_RED() "" +#define SHELL_COLOR_END() "" + +#endif + +///////////////////////////////////////////////////////////////////////////////////////////////// + +PerformanceReport::PerformanceReport( + Options const &options, + std::vector const &argument_names, + library::OperationKind const &op_kind +): + options_(options), argument_names_(argument_names), problem_index_(0), good_(true), op_kind_(op_kind) { + + // Strip '.csv' if present + std::string base_path = options_.report.output_path; + base_path = base_path.substr(0, base_path.rfind(".csv")); + op_file_name_ = base_path + "." + to_string(op_kind_) + ".csv"; + + base_path = options_.report.junit_output_path; + base_path = base_path.substr(0, base_path.rfind(".xml")); + base_path = base_path.substr(0, base_path.rfind(".junit")); + op_junit_file_name_ = base_path + "." + to_string(op_kind_) + ".junit.xml"; + + // + // Open output file for operation of PerformanceReport::op_kind + // + if (!options_.report.output_path.empty()) { + + bool print_header = true; + + if (options_.report.append) { + + std::ifstream test_output_file(op_file_name_); + + if (test_output_file.is_open()) { + print_header = false; + test_output_file.close(); + } + + output_file_.open(op_file_name_, std::ios::app); + } + else { + output_file_.open(op_file_name_); + } + + if (!output_file_.good()) { + + std::cerr << "Could not open output file at path '" + << options_.report.output_path << "'" << std::endl; + + good_ = false; + } + + if (print_header) { + print_csv_header_(output_file_) << std::endl; + } + } + + if (!options_.report.junit_output_path.empty()) { + + junit_output_file_.open(op_junit_file_name_); + + if (!junit_output_file_.good()) { + + std::cerr << "Could not open junit output file at path '" + << options_.report.junit_output_path << "'" << std::endl; + + good_ = false; + } + + print_junit_header_(junit_output_file_); + } +} + +void PerformanceReport::next_problem() { + ++problem_index_; +} + +void PerformanceReport::append_result(PerformanceResult result) { + + result.problem_index = problem_index_; + + if (options_.report.verbose) { + std::cout << "\n"; + print_result_pretty_(std::cout, result) << std::flush; + } + + if (junit_output_file_.is_open()) { + print_junit_result_(junit_output_file_, result); + } + + if (output_file_.is_open()) { + print_result_csv_(output_file_, result) << std::endl; + } + else { + concatenated_results_.push_back(result); + } +} + +void PerformanceReport::sort_results(PerformanceResultVector &results) { + + struct FlopsPerByteCompare + { + bool operator()(const PerformanceResult &a, const PerformanceResult &b) + { + double a_flops_per_byte = double(a.flops) / double(a.bytes); + double b_flops_per_byte = double(b.flops) / double(b.bytes); + + return (a_flops_per_byte < b_flops_per_byte); + } + }; + + std::stable_sort(results.begin(), results.end(), FlopsPerByteCompare()); +} + +void PerformanceReport::append_results(PerformanceResultVector const &results) { + + if (options_.report.verbose) { + std::cout << "\n\n"; + } + + // For each result + for (auto const & result : results) { + append_result(result); + } +} + +PerformanceReport::~PerformanceReport() { + + // + // Output results to stdout if they were not written to a file already. + // + if (options_.report.verbose && !concatenated_results_.empty()) { + + if (options_.report.sort_results) { + sort_results(concatenated_results_); + } + + std::cout << "\n\n"; + std::cout << "=============================\n\n"; + std::cout << "CSV Results:\n\n"; + + print_csv_header_(std::cout) << std::endl; + + for (auto const &result : concatenated_results_) { + print_result_csv_(std::cout, result) << "\n"; + } + } + else if (output_file_.is_open() && options_.report.verbose) { + std::cout << "\nWrote results to '" << op_file_name_ << "'" << std::endl; + } + + if (output_file_.is_open()) { + output_file_.close(); + } + + if (junit_output_file_.is_open()) { + print_junit_footer_(junit_output_file_); + junit_output_file_.close(); + std::cout << "\nWrote jUnit results to '" << op_junit_file_name_ << "'" << std::endl; + } +} + +static const char *disposition_status_color(Disposition disposition) { + switch (disposition) { + case Disposition::kPassed: return SHELL_COLOR_GREEN(); + case Disposition::kIncorrect: return SHELL_COLOR_RED(); + case Disposition::kFailed: return SHELL_COLOR_RED(); + default: + break; + } + return SHELL_COLOR_END(); +} + +/// Prints the result in human readable form +std::ostream & PerformanceReport::print_result_pretty_( + std::ostream &out, + PerformanceResult const &result, + bool use_shell_coloring) { + + out << "=============================\n" + << " Problem ID: " << result.problem_index << "\n"; + + if (!options_.report.pivot_tags.empty()) { + + out << " Tags: "; + + int column_idx = 0; + for (auto const & tag : options_.report.pivot_tags) { + out << (column_idx++ ? "," : "") << tag.first << ":" << tag.second; + } + + out << "\n"; + } + + std::string shell_color_bright = use_shell_coloring ? SHELL_COLOR_BRIGHT() : ""; + std::string shell_color_end = use_shell_coloring ? SHELL_COLOR_END() : ""; + auto _disposition_status_color = [&](Disposition d) -> const char * { + return use_shell_coloring ? disposition_status_color(d) : ""; + }; + + out + << "\n" + << " Provider: " << shell_color_bright << library::to_string(result.provider, true) << shell_color_end << "\n" + << " OperationKind: " << shell_color_bright << library::to_string(result.op_kind) << shell_color_end << "\n" + << " Operation: " << result.operation_name << "\n\n" + << " Status: " << shell_color_bright << library::to_string(result.status, true) << shell_color_end << "\n" + << " Verification: " << shell_color_bright << (options_.verification.enabled ? "ON":"OFF") << shell_color_end << "\n" + << " Disposition: " << _disposition_status_color(result.disposition) << to_string(result.disposition, true) << shell_color_end << "\n\n"; + + // Display individual verification results for each verification-provider + if (options_.verification.enabled) { + + static int const indent_spaces = 16; + + for(auto & m : result.verification_map) { + out << std::right << std::setw(indent_spaces) << library::to_string(m.first, true) << ": " << to_string(m.second, true) << "\n"; + } + } + + out + << "\n Arguments:"; + + int column_idx = 0; + for (auto const &arg : result.arguments) { + if (!arg.second.empty()) { + out << " --" << arg.first << "=" << arg.second; + column_idx += int(4 + arg.first.size() + arg.second.size()); + if (column_idx > 98) { + out << " \\\n "; + column_idx = 0; + } + } + } + out << "\n\n"; + + out + << " Bytes: " << result.bytes << " bytes\n" + << " FLOPs: " << result.flops << " flops\n" + << " FLOPs/Byte: " << (result.flops / result.bytes) << "\n\n"; + + if (result.good()) { + + out + << " Runtime: " << result.runtime << " ms\n" + << " Memory: " << result.gbytes_per_sec() << " GiB/s\n" + << "\n Math: " << result.gflops_per_sec() << " GFLOP/s\n"; + + } + + return out; +} + +/// Prints the CSV header +std::ostream & PerformanceReport::print_csv_header_( + std::ostream &out) { + + int column_idx = 0; + + // Pivot tags + for (auto const & tag : options_.report.pivot_tags) { + out << (column_idx++ ? "," : "") << tag.first; + } + + out + << (column_idx ? "," : "") << "Problem,Provider" + << ",OperationKind,Operation,Disposition,Status"; + + for (auto const &arg_name : argument_names_) { + out << "," << arg_name; + } + + out + << ",Bytes" + << ",Flops" + << ",Flops/Byte" + << ",Runtime" + << ",GB/s" + << ",GFLOPs" + ; + + return out; +} + +/// Print the result in CSV output +std::ostream & PerformanceReport::print_result_csv_( + std::ostream &out, + PerformanceResult const &result) { + + int column_idx = 0; + + // Pivot tags + for (auto const & tag : options_.report.pivot_tags) { + out << (column_idx++ ? "," : "") << tag.second; + } + + out + << (column_idx ? "," : "") + << result.problem_index + << "," << to_string(result.provider, true) + << "," << to_string(result.op_kind) + << "," << result.operation_name + << "," << to_string(result.disposition) + << "," << library::to_string(result.status); + + for (auto const & arg : result.arguments) { + out << "," << arg.second; + } + + out + << "," << result.bytes + << "," << result.flops + << "," << result.flops / result.bytes + << "," << result.runtime; + + if (result.good()) { + + out + << "," << result.gbytes_per_sec() + << "," << result.gflops_per_sec() + ; + + } + else { + out << std::string(2 + , ',' + ); + } + + return out; +} + +std::ostream & PerformanceReport::print_junit_header_(std::ostream &out) { + + out << "" << std::endl; + out << "" << std::endl; + return out; + +} + +namespace { + + std::string escape_xml_special_chars(const std::string& src) { + std::stringstream dst; + for (char ch : src) { + switch (ch) { + case '&': dst << "&"; break; + case '\'': dst << "'"; break; + case '"': dst << """; break; + case '<': dst << "<"; break; + case '>': dst << ">"; break; + default: dst << ch; break; + } + } + return dst.str(); + } + + template + std::ostream & print_junit_result_property_(std::ostream & os, const std::string & name, const T & property) { + return os << " " << std::endl; + } +} + +std::ostream & PerformanceReport::print_junit_result_(std::ostream &out, PerformanceResult const &result) { + + out << " " << "" << std::endl; + + if (failed) { + out << " " << std::endl; + } + + if (error) { + out << " " << std::endl; + } + + out << " " << std::endl; + + out << " " << std::endl; + + return out; + +} + +std::ostream & PerformanceReport::print_junit_footer_(std::ostream &out) { + + out << "" << std::endl; + return out; + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/performance_result.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/performance_result.cu new file mode 100644 index 0000000000000000000000000000000000000000..438c0f258a8d4c815843df8e94f3281d8582e7f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/performance_result.cu @@ -0,0 +1,61 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief +*/ + +#pragma once + +#include + +#include "cutlass/cutlass.h" + +// CUTLASS Profiler includes +#include "cutlass/profiler/enumerated_types.h" +#include "cutlass/profiler/performance_result.h" + +// CUTLASS Library includes +#include "cutlass/library/library.h" +#include "cutlass/library/util.h" + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/problem_space.cpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/problem_space.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f89396b164c0fb571f2ee26cd2914a1f48844177 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/problem_space.cpp @@ -0,0 +1,1267 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief +*/ + +#include +#include +#include + +#include "cutlass/library/util.h" + +#include "cutlass/profiler/problem_space.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +static T lexical_cast(std::string const &str) { + std::stringstream ss; + T value; + + ss << str; + ss >> value; + + return value; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +std::ostream & KernelArgument::ValueIterator::print(std::ostream &out) const { + out << "[" << (void *)this << " " << argument->qualified_name() << "] "; + if (this->null_argument) { + out << ""; + } + else { + out << ""; + } + return out; +} + +KernelArgument::~KernelArgument() { + +} + +////////////////////////////////////////////////////////////////////////////////////////////////// + +ScalarArgument::ScalarValue::ScalarValue( + std::string const &value_, + ScalarArgument const *argument_, + bool not_null_ +): + KernelArgument::Value(argument_, not_null_), + value(value_) { + +} + +std::ostream &ScalarArgument::ScalarValue::print(std::ostream &out) const { + out << argument->qualified_name() << ": "; + if (not_null) { + out << value; + } + else { + out << ""; + } + return out; +} + +ScalarArgument::ScalarValueIterator::ScalarValueIterator( + ScalarArgument const *argument_ +): + KernelArgument::ValueIterator(argument_) { + + if (argument_) { + value_it = argument_->values.begin(); + } +} + +void ScalarArgument::ScalarValueIterator::operator++() { + if (this->null_argument) { + this->null_argument = false; + } + else { + ++value_it; + } +} + +bool ScalarArgument::ScalarValueIterator::operator==(ValueIterator const &it) const { + if (it.type() != ArgumentTypeID::kScalar) { + throw std::runtime_error("Cannot compare ScalarValueIterator with iterator of different type"); + } + auto const & scalar_it = static_cast(it); + return value_it == scalar_it.value_it; +} + +/// Gets the value pointed to +std::unique_ptr ScalarArgument::ScalarValueIterator::at() const { + if (this->null_argument) { + return std::unique_ptr( + new ScalarArgument::ScalarValue( + std::string(), + static_cast(argument), + false)); + } + else { + return std::unique_ptr( + new ScalarArgument::ScalarValue( + *value_it, + static_cast(argument))); + } +} + +std::unique_ptr ScalarArgument::begin() const { + return std::unique_ptr(new ScalarValueIterator(this)); +} + +std::unique_ptr ScalarArgument::end() const { + ScalarValueIterator *it = new ScalarValueIterator(this); + it->value_it = this->values.end(); + it->null_argument = false; + return std::unique_ptr(it); +} + +////////////////////////////////////////////////////////////////////////////////////////////////// + +IntegerArgument::IntegerValue::IntegerValue( + int64_t value_, + IntegerArgument const *argument_, + bool not_null_ +): KernelArgument::Value(argument_, not_null_), value(value_) { + +} + + +/// Pretty printer for debugging +std::ostream &IntegerArgument::IntegerValue::print(std::ostream &out) const { + out << argument->qualified_name() << ": "; + if (not_null) { + out << value; + } + else { + out << ""; + } + return out; +} + +IntegerArgument::IntegerValueIterator::IntegerValueIterator(IntegerArgument const *argument_): + KernelArgument::ValueIterator(argument_) { + + if (argument_) { + range_it = argument_->ranges.begin(); + if (range_it != argument_->ranges.end()) { + value_it = range_it->begin(); + } + } +} + +void IntegerArgument::IntegerValueIterator::operator++() { + + if (this->null_argument) { + this->null_argument = false; + } + else { + ++value_it; + if (value_it == range_it->end()) { + ++range_it; + if (range_it != static_cast(argument)->ranges.end()) { + value_it = range_it->begin(); + } + } + } +} + +bool IntegerArgument::IntegerValueIterator::operator==(ValueIterator const &it) const { + if (it.type() != ArgumentTypeID::kInteger) { + throw std::runtime_error("Cannot compare IntegerValueIterator with iterator of different type"); + } + + auto const & integer_iterator = static_cast(it); + + if (this->null_argument) { + return it.null_argument; + } + else { + if (range_it != integer_iterator.range_it) { + return false; + } + if (range_it == static_cast(argument)->ranges.end() && + range_it == integer_iterator.range_it) { + return true; + } + return value_it == integer_iterator.value_it; + } +} + +std::unique_ptr IntegerArgument::IntegerValueIterator::at() const { + if (this->null_argument) { + return std::unique_ptr( + new IntegerArgument::IntegerValue( + 0, static_cast(argument), false)); + } + else { + return std::unique_ptr( + new IntegerArgument::IntegerValue( + *value_it, static_cast(argument))); + } +} + +std::unique_ptr IntegerArgument::begin() const { + return std::unique_ptr(new IntegerValueIterator(this)); +} + +std::unique_ptr IntegerArgument::end() const { + IntegerValueIterator *it = new IntegerValueIterator(this); + it->range_it = this->ranges.end(); + it->null_argument = false; + return std::unique_ptr(it); +} + +////////////////////////////////////////////////////////////////////////////////////////////////// + +TensorArgument::TensorValue::TensorValue( + TensorDescription const &desc_, + TensorArgument const *argument_, + bool not_null_ +): + KernelArgument::Value(argument_, not_null_), + desc(desc_) { + +} + +/// Pretty printer for debugging +std::ostream &TensorArgument::TensorValue::print(std::ostream &out) const { + out << argument->qualified_name() << ": " << to_string(desc.element) << ": " << to_string(desc.layout); + return out; +} + +TensorArgument::TensorValueIterator::TensorValueIterator( + TensorArgument const *argument_ +): + KernelArgument::ValueIterator(argument_) { + + if (argument_) { + value_it = argument_->values.begin(); + } +} + +void TensorArgument::TensorValueIterator::operator++() { + if (this->null_argument) { + this->null_argument = false; + } + else { + ++value_it; + } +} + +bool TensorArgument::TensorValueIterator::operator==(ValueIterator const &it) const { + if (it.type() != ArgumentTypeID::kTensor) { + throw std::runtime_error("Cannot compare TensorValueIterator with iterator of different type"); + } + auto const & tensor_it = static_cast(it); + return value_it == tensor_it.value_it; +} + +/// Gets the value pointed to +std::unique_ptr TensorArgument::TensorValueIterator::at() const { + + if (this->null_argument) { + return std::unique_ptr( + new TensorArgument::TensorValue( + TensorDescription(), static_cast(argument), false)); + } + else { + return std::unique_ptr( + new TensorArgument::TensorValue( + *value_it, static_cast(argument))); + } +} + +std::unique_ptr TensorArgument::begin() const { + return std::unique_ptr(new TensorValueIterator(this)); +} + +std::unique_ptr TensorArgument::end() const { + TensorValueIterator *it = new TensorValueIterator(this); + it->value_it = this->values.end(); + it->null_argument = false; + return std::unique_ptr(it); +} + +////////////////////////////////////////////////////////////////////////////////////////////////// + +EnumeratedTypeArgument::EnumeratedTypeValue::EnumeratedTypeValue( + std::string const & element_, + EnumeratedTypeArgument const *argument_, + bool not_null_ +): + KernelArgument::Value(argument_, not_null_), + element(element_) { + +} + +/// Pretty printer for debugging +std::ostream &EnumeratedTypeArgument::EnumeratedTypeValue::print(std::ostream &out) const { + out << argument->qualified_name() << ": " << element; + return out; +} + +EnumeratedTypeArgument::EnumeratedTypeValueIterator::EnumeratedTypeValueIterator( + EnumeratedTypeArgument const *argument_ +): + KernelArgument::ValueIterator(argument_) { + + if (argument_) { + value_it = argument_->values.begin(); + } +} + +void EnumeratedTypeArgument::EnumeratedTypeValueIterator::operator++() { + if (this->null_argument) { + this->null_argument = false; + } + else { + ++value_it; + } +} + +bool EnumeratedTypeArgument::EnumeratedTypeValueIterator::operator==(ValueIterator const &it) const { + + if (it.type() != ArgumentTypeID::kEnumerated) { + throw std::runtime_error("Cannot compare EnumeratedTypeValueIterator with iterator of different type"); + } + + auto const & enumerated_type_it = static_cast(it); + return value_it == enumerated_type_it.value_it; +} + +/// Gets the value pointed to +std::unique_ptr EnumeratedTypeArgument::EnumeratedTypeValueIterator::at() const { + + if (this->null_argument) { + return std::unique_ptr( + new EnumeratedTypeValue( + std::string(), static_cast(argument), false)); + } + else { + return std::unique_ptr( + new EnumeratedTypeValue( + *value_it, static_cast(argument))); + } +} + +std::unique_ptr EnumeratedTypeArgument::begin() const { + return std::unique_ptr(new EnumeratedTypeValueIterator(this)); +} + +std::unique_ptr EnumeratedTypeArgument::end() const { + EnumeratedTypeValueIterator *it = new EnumeratedTypeValueIterator(this); + it->value_it = this->values.end(); + it->null_argument = false; + return std::unique_ptr(it); +} + +////////////////////////////////////////////////////////////////////////////////////////////////// + +ProblemSpace::Iterator::Iterator() { + +} + +ProblemSpace::Iterator::Iterator(ProblemSpace const &problem_space) { + for (auto const & arg_ptr : problem_space.arguments) { + construct_(arg_ptr.get()); + } +} + +ProblemSpace::Iterator::Iterator(Iterator && it) { + iterators = std::move(it.iterators); +} + +/// Helper for recursively constructing iterators +void ProblemSpace::Iterator::construct_(KernelArgument const *argument) { + iterators.emplace_back(argument->begin()); +} + +/// Given a set of ranges, iterate over the points within their Cartesian product. No big deal. +void ProblemSpace::Iterator::operator++() { + + // Define a pair of iterator into the vector of iterators. + IteratorVector::iterator iterator_it = iterators.begin(); + IteratorVector::iterator next_iterator = iterator_it; + + // Advance the first argument. + ++(**iterator_it); + + // Maintain a pair of iterators over consecutive arguments. + ++next_iterator; + + // Carry logic + while (next_iterator != iterators.end() && + **iterator_it == *((*iterator_it)->argument->end())) { // Did an iterator reach the end of its range? + + (*iterator_it) = (*iterator_it)->argument->begin(); // Reset that iterator, + + ++(**next_iterator); // and increment the next argument's iterator. + + iterator_it = next_iterator; // Advance to the next argument + ++next_iterator; + } +} + +/// Moves iterator to end +void ProblemSpace::Iterator::move_to_end() { + if (!iterators.empty()) { + std::unique_ptr new_iter = iterators.back()->argument->end(); + std::swap(iterators.back(), new_iter); + } +} + +ProblemSpace::Problem ProblemSpace::Iterator::at() const { + Problem problem; + + for (std::unique_ptr const & it : iterators) { + problem.emplace_back(it->at()); + } + + return problem; +} + +/// Equality operator +bool ProblemSpace::Iterator::operator==(Iterator const &it) const { + + // This would be an opportunity for auto, but explicitly denoting references to + // owning smart pointers to dynamic polymorphic objects seems like a kindness to the reader. + IteratorVector::const_iterator first_it = iterators.begin(); + IteratorVector::const_iterator second_it = it.iterators.begin(); + + int idx = 0; + for (; first_it != iterators.end(); ++first_it, ++second_it, ++idx) { + + KernelArgument::ValueIterator const *my_it = first_it->get(); + KernelArgument::ValueIterator const *their_it = second_it->get(); + + if (*my_it != *their_it) { + return false; + } + } + + return true; +} + +std::ostream &ProblemSpace::Iterator::print(std::ostream &out) const { + + for (std::unique_ptr const & iter_ptr : iterators) { + out << " [iter " << (iter_ptr->null_argument ? "null" : "") + << ", type: " << to_string(iter_ptr->argument->description->type) << "]" << std::endl; + } + + return out; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +ProblemSpace::ProblemSpace(ArgumentDescriptionVector const &schema, CommandLine const &cmdline) { + + // Clone the arguments + for (ArgumentDescription const & arg_desc : schema) { + clone_(arguments, &arg_desc); + } + + // Parse values from the command line + for (auto & arg : arguments) { + parse_(arg.get(), cmdline); + } +} + + +/// Returns the index of an argument by name +size_t ProblemSpace::argument_index(char const *name) const { + return argument_index_map.at(name); +} + +/// Helper for recursively cloning +void ProblemSpace::clone_( + KernelArgumentVector &kernel_args, + ArgumentDescription const *arg_desc) { + + KernelArgument *kernel_arg = nullptr; + + switch (arg_desc->type) { + case ArgumentTypeID::kScalar: + kernel_arg = new ScalarArgument(arg_desc); + break; + case ArgumentTypeID::kInteger: + kernel_arg = new IntegerArgument(arg_desc); + break; + case ArgumentTypeID::kTensor: + kernel_arg = new TensorArgument(arg_desc); + break; + case ArgumentTypeID::kStructure: + { + throw std::runtime_error("ArgumentTypeID::kStructure not supported"); + } + break; + case ArgumentTypeID::kEnumerated: + kernel_arg = new EnumeratedTypeArgument(arg_desc); + break; + + default: break; + } + + if (kernel_arg) { + size_t idx = kernel_args.size(); + for (auto const &alias : arg_desc->aliases) { + argument_index_map.insert(std::make_pair(alias, idx)); + } + kernel_args.emplace_back(kernel_arg); + } +} + +/// Parses a command line +void ProblemSpace::parse_(KernelArgument *arg, CommandLine const &cmdline) { + + switch (arg->description->type) { + case ArgumentTypeID::kScalar: + { + auto * scalar = static_cast(arg); + + for (auto const &alias : arg->description->aliases) { + if (cmdline.check_cmd_line_flag(alias.c_str())) { + + std::vector> tokens; + cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); + + for (auto const & vec : tokens) { + if (!vec.empty()) { + scalar->values.push_back(vec.front()); + } + } + break; + } + } + } + break; + case ArgumentTypeID::kInteger: + { + auto *integer = static_cast(arg); + + for (auto const &alias : arg->description->aliases) { + if (cmdline.check_cmd_line_flag(alias.c_str())) { + + std::vector > tokens; + cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); + + for (auto &range_tokens : tokens) { + + if (!range_tokens.empty()) { + + Range range; + + if (range_tokens.front() == "rand") { + range.mode = Range::Mode::kRandom; + } + else if (range_tokens.front() == "randlg2") { + range.mode = Range::Mode::kRandomLog2; + } + + switch (range.mode) { + case Range::Mode::kSequence: + { + range.first = lexical_cast(range_tokens.front()); + + if (range_tokens.size() > 1) { + range.last = lexical_cast(range_tokens.at(1)); + } + else { + range.last = range.first; + } + + if (range_tokens.size() > 2) { + range.increment = lexical_cast(range_tokens.at(2)); + } + else { + range.increment = 1; + } + } + break; + case Range::Mode::kRandom: // fall-through + case Range::Mode::kRandomLog2: + { + if (range_tokens.size() < 4) { + throw std::runtime_error( + "Range of mode 'rand' must have four tokens showing " + "the minimum, maximum, and number of iterations. For example, " + "rand:16:128:1000"); + } + + range.minimum = lexical_cast(range_tokens.at(1)); + range.maximum = lexical_cast(range_tokens.at(2)); + range.first = 1; + range.last = lexical_cast(range_tokens.at(3)); + range.increment = 1; + + if (range_tokens.size() > 4) { + range.divisible = lexical_cast(range_tokens.at(4)); + } + } + break; + default: + throw std::runtime_error("Unsupported range mode."); + break; + } + + integer->ranges.push_back(range); + } + } + break; + } + } + } + break; + case ArgumentTypeID::kTensor: + { + auto *tensor = static_cast(arg); + + for (auto const &alias : arg->description->aliases) { + if (cmdline.check_cmd_line_flag(alias.c_str())) { + + std::vector> tokens; + + cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); + + for (auto const & tensor_tokens : tokens) { + if (!tensor_tokens.empty()) { + TensorArgument::TensorDescription tensor_desc; + + tensor_desc.element = cutlass::library::from_string(tensor_tokens.front()); + + // Layout + if (tensor_tokens.size() > 1) { + tensor_desc.layout = cutlass::library::from_string(tensor_tokens.at(1)); + } + + // Stride + for (size_t i = 2; i < tensor_tokens.size(); ++i) { + tensor_desc.stride.push_back(lexical_cast(tensor_tokens.at(i))); + } + + tensor->values.push_back(tensor_desc); + } + } + break; + } + } + } + break; + case ArgumentTypeID::kStructure: + { + throw std::runtime_error("Structure arguments not supported"); + } + break; + case ArgumentTypeID::kEnumerated: + { + auto *enumerated_type = static_cast(arg); + + for (auto const &alias : arg->description->aliases) { + if (cmdline.check_cmd_line_flag(alias.c_str())) { + + std::vector tokens; + cmdline.get_cmd_line_arguments(alias.c_str(), tokens); + + for (auto const & token : tokens) { + enumerated_type->values.push_back(token); + } + + break; + } + } + } + break; + default: + break; + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +ProblemSpace::Iterator ProblemSpace::begin() const { + return ProblemSpace::Iterator(*this); +} + +ProblemSpace::Iterator ProblemSpace::end() const { + ProblemSpace::Iterator it(*this); + it.move_to_end(); + return it; +} + +/// Gets all argument names as an ordered vector +std::vector ProblemSpace::argument_names() const { + + Problem problem = this->begin().at(); + + std::vector names; + names.reserve(problem.size()); + + for (auto const & arg : problem) { + names.push_back(arg->argument->description->aliases.front()); + } + + return names; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_int(int64_t &int_value, KernelArgument::Value const *value_ptr) { + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kInteger) { + int_value = static_cast(value_ptr)->value; + } + else if (value_ptr->argument->description->type == ArgumentTypeID::kScalar) { + std::stringstream ss; + ss << static_cast(value_ptr)->value; + ss >> int_value; + } + else { + throw std::runtime_error( + "arg_as_int64_t() - illegal cast. Problem space argument must be integer or scalar"); + } + + return true; + } + + return false; +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_int(int &int_value, KernelArgument::Value const *value_ptr) { + int64_t value64; + bool obtained = arg_as_int(value64, value_ptr); + if (obtained) { + int_value = int(value64); + return true; + } + return false; +} + +/// Lexically casts an argument to an int +bool arg_as_int( + int &int_value, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_int(int_value, value_ptr); +} + +/// Lexically casts an argument to an int64 +bool arg_as_int( + int64_t &int_value, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_int(int_value, value_ptr); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_NumericTypeID( + library::NumericTypeID &numeric_type, + KernelArgument::Value const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + + numeric_type = library::from_string( + static_cast(value_ptr)->element); + + if (numeric_type == library::NumericTypeID::kInvalid) { + throw std::runtime_error( + "arg_as_NumericTypeID() - illegal cast."); + } + } + else { + + throw std::runtime_error( + "arg_as_NumericTypeID() - illegal cast."); + } + return true; + } + return false; +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_NumericTypeID( + library::NumericTypeID &numeric_type, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_NumericTypeID(numeric_type, value_ptr); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_RasterOrder( + library::RasterOrder &raster_order, + KernelArgument::Value const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + + raster_order = library::from_string( + static_cast(value_ptr)->element); + + if (raster_order == library::RasterOrder::kInvalid) { + throw std::runtime_error( + "arg_as_RasterOrder() - illegal cast."); + } + } + else { + throw std::runtime_error( + "arg_as_RasterOrder() - illegal cast."); + } + return true; + } + return false; +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_RasterOrder( + library::RasterOrder &raster_order, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_RasterOrder(raster_order, value_ptr); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_LayoutTypeID( + library::LayoutTypeID &layout_type, + KernelArgument::Value const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + + layout_type = library::from_string( + static_cast(value_ptr)->element); + + if (layout_type == library::LayoutTypeID::kInvalid) { + throw std::runtime_error( + "arg_as_LayoutTypeID() - illegal cast."); + } + } + else { + + throw std::runtime_error( + "arg_as_LayoutTypeID() - illegal cast."); + } + return true; + } + return false; +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_LayoutTypeID( + library::LayoutTypeID &layout_type, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_LayoutTypeID(layout_type, value_ptr); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_OpcodeClassID( + library::OpcodeClassID &opcode_class, + KernelArgument::Value const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + + opcode_class = library::from_string( + static_cast(value_ptr)->element); + + if (opcode_class == library::OpcodeClassID::kInvalid) { + throw std::runtime_error( + "arg_as_OpcodeClassID() - illegal cast."); + } + } + else { + + throw std::runtime_error( + "arg_as_OpcodeClassID() - illegal cast."); + } + return true; + } + return false; +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_OpcodeClassID( + library::OpcodeClassID &opcode_class, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_OpcodeClassID(opcode_class, value_ptr); +} + + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_SplitKModeID( + library::SplitKMode &split_k_mode, + KernelArgument::Value const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + + split_k_mode = library::from_string( + static_cast(value_ptr)->element); + + if (split_k_mode == library::SplitKMode::kInvalid) { + throw std::runtime_error( + "arg_as_SplitKModeID() - illegal cast."); + } + } + else { + + throw std::runtime_error( + "arg_as_SplitKModeID() - illegal cast."); + } + return true; + } + return false; +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_SplitKModeID( + library::SplitKMode &split_k_mode, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_SplitKModeID(split_k_mode, value_ptr); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_ConvModeID( + library::ConvModeID &conv_mode, + KernelArgument::Value const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + + conv_mode = library::from_string( + static_cast(value_ptr)->element); + + if (conv_mode == library::ConvModeID::kInvalid) { + throw std::runtime_error( + "arg_as_ConvModeID() - illegal cast."); + } + } + else { + + throw std::runtime_error( + "arg_as_ConvModeID() - illegal cast."); + } + return true; + } + return false; +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_ConvModeID( + library::ConvModeID &conv_mode, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_ConvModeID(conv_mode, value_ptr); +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_ProviderID( + library::Provider &provider, + KernelArgument::Value const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + + provider = library::from_string( + static_cast(value_ptr)->element); + + if (provider == library::Provider::kInvalid) { + throw std::runtime_error( + "arg_as_ProviderID() - illegal cast."); + } + } + else { + + throw std::runtime_error( + "arg_as_ProviderID() - illegal cast."); + } + return true; + } + return false; +} + +/// Lexically casts an argument to an int64 if it is defined. Returns true if not null. +bool arg_as_ProviderID( + library::Provider &provider, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_ProviderID(provider, value_ptr); +} +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Lexically casts an argument to a given type stored in a byte array. Returns true if not null. +bool arg_as_scalar( + std::vector &bytes, + library::NumericTypeID numeric_type, + KernelArgument::Value const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->argument->description->type == ArgumentTypeID::kInteger) { + int64_t int_value = static_cast(value_ptr)->value; + + // TODO - convert int64_t => destination type + } + else if (value_ptr->argument->description->type == ArgumentTypeID::kScalar) { + std::string const &str_value = static_cast(value_ptr)->value; + + return lexical_cast(bytes, numeric_type, str_value); + } + else { + throw std::runtime_error( + "arg_as_int() - illegal cast. Problem space argument must be integer or scalar"); + } + + return true; + } + + return false; +} + +/// Lexically casts an argument to a given type and returns a byte array +bool arg_as_scalar( + std::vector &bytes, + library::NumericTypeID numeric_type, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + return arg_as_scalar(bytes, numeric_type, value_ptr); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if a tensor description satisfies a `tensor` value +bool tensor_description_satisfies( + library::TensorDescription const &tensor_desc, + TensorArgument::TensorValue const *value_ptr) { + + if (value_ptr->not_null) { + if (value_ptr->desc.element != library::NumericTypeID::kUnknown && + value_ptr->desc.element != tensor_desc.element) { + + return false; + } + + if (value_ptr->desc.layout != library::LayoutTypeID::kUnknown && + value_ptr->desc.layout != tensor_desc.layout) { + + return false; + } + } + + return true; +} + +/// Returns true if a tensor description satisfies a `tensor` value +bool tensor_description_satisfies( + library::TensorDescription const &tensor_desc, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + if (value_ptr->argument->description->type == ArgumentTypeID::kTensor) { + return tensor_description_satisfies( + tensor_desc, + static_cast(value_ptr)); + } + else { + throw std::runtime_error("Kernel argument mismatch"); + } + + return false; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if conv_kind satisfies the value +bool conv_kind_satisfies( + library::ConvKind const &conv_kind, + EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr) { + + if (value_ptr->not_null) { + library::ConvKind conv_kind_cmd_line = + library::from_string(value_ptr->element); + + if (conv_kind_cmd_line != library::ConvKind::kUnknown && + conv_kind_cmd_line != conv_kind) { + + return false; + } + } + + return true; +} + +/// Returns true if conv_kind satisfies the value +bool conv_kind_satisfies( + library::ConvKind const &conv_kind, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + return conv_kind_satisfies( + conv_kind, + static_cast(value_ptr)); + } + else { + throw std::runtime_error("Kernel argument mismatch"); + } + + return false; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if a iterator algorithm satisfies the value +bool iterator_algorithm_satisfies( + library::IteratorAlgorithmID const &iterator_algorithm, + EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr) { + + if (value_ptr->not_null) { + library::IteratorAlgorithmID iterator_algorithm_cmd_line = + library::from_string(value_ptr->element); + + if (iterator_algorithm_cmd_line != library::IteratorAlgorithmID::kNone && + iterator_algorithm_cmd_line != iterator_algorithm) { + + return false; + } + } + + return true; +} + +/// Returns true if a iterator algorithm satisfies the value +bool iterator_algorithm_satisfies( + library::IteratorAlgorithmID const &iterator_algorithm, + char const *name, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + size_t idx = problem_space.argument_index(name); + KernelArgument::Value const *value_ptr = problem.at(idx).get(); + + if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { + return iterator_algorithm_satisfies( + iterator_algorithm, + static_cast(value_ptr)); + } + else { + throw std::runtime_error("Kernel argument mismatch"); + } + + return false; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/rank_2k_operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/rank_2k_operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..562992dcd36b585425e884b23f79ab5cb5184350 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/rank_2k_operation_profiler.cu @@ -0,0 +1,732 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment + + +*/ + +#include +#include +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/profiler/cublas_helpers.h" +#include "cutlass/profiler/rank_2k_operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +Rank2KOperationProfiler::Rank2KOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kRank2K, + { + {ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"}, + {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"}, + {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"}, + {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, + {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, + {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, + {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"}, + {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"}, + }, + { library::Provider::kCUBLAS} + ) { + description_ = " Rank 2k Update. D = alpha * (A*B^T + B*A^T) + beta * C (symmetric) or D = alpha * (A*B^H+B*A^H) + beta * C (hermitian)"; +} + +/// Destructor +Rank2KOperationProfiler::~Rank2KOperationProfiler() { + +} + +/// Prints usage statement for the math function +void Rank2KOperationProfiler::print_usage(std::ostream &out) const { + out << "RankK" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void Rank2KOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular problem size Syrk kernel:\n" + << " $ cutlass_profiler --operation=rank_2k --blas_mode=symmetric --n=1024 --k=128\n\n" + + << "Profile a particular problem size Herk kernel:\n" + << " $ cutlass_profiler --operation=rank_2k --blas_mode=hermitian --n=1024 --k=128\n\n" + + << "Schmoo over problem size and beta:\n" + << " $ cutlass_profiler --operation=rank_2k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" + + << "Schmoo over accumulator types:\n" + << " $ cutlass_profiler --operation=rank_2k --accumulator-type=f16,f32\n\n" + + << "Schmoo over fill modees:\n" + << " $ cutlass_profiler --operation=rank_2k --fill_mode=lower/upper\n\n" + + << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" + << " $ cutlass_profiler --operation=rank_2k --A=f16:column or --A=*:row\n\n" + + << "Using various input value distribution:\n" + << " $ cutlass_profiler --operation=rank_2k --dist=uniform,min:0,max:3\n" + << " $ cutlass_profiler --operation=rank_2k --dist=gaussian,mean:0,stddev:3\n" + << " $ cutlass_profiler --operation=rank_2k --dist=sequential,start:0,delta:1\n\n" + + << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" + << " $ cutlass_profiler --operation=rank_2k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" + + << "Test your changes to rank_2k kernels with a quick functional test and save results in functional-test.csv:\n" + << " $ cutlass_profiler --operation=rank_2k \\ \n" + << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" + << " --beta=0,1,2 --profiling-iterations=1 \\ \n" + << " --providers=cutlass --output=functional-test.csv\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if 0 +// used this for debugging +static std::string byte_string(std::vector const &bytes) { + std::stringstream ss; + + ss << "0x"; + + for (size_t idx = bytes.size(); idx > 0; --idx) { + ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); + } + + return ss.str(); +} +#endif + +Status Rank2KOperationProfiler::RankKProblem::parse( + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!arg_as_int(this->n, "n", problem_space, problem)) { + // default value + this->n = 1024; + } + + if (!arg_as_int(this->k, "k", problem_space, problem)) { + // default value + this->k = 1024; + } + + if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + this->split_k_slices = 1; + } + + if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { + // default value + this->batch_count = 1; + } + + if (this->split_k_slices > 1 && this->batch_count > 1) { + // At least one of these must be one + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + this->alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + this->beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, {int(this->n), int(this->k)}).front(); + + this->ldb = DeviceAllocation::get_packed_layout( + operation_desc.B.layout, {int(this->n), int(this->k)}).front(); + + this->ldc = DeviceAllocation::get_packed_layout( + operation_desc.C.layout, {int(this->n), int(this->n)}).front(); + + return Status::kSuccess; +} + +/// Total number of bytes loaded +int64_t Rank2KOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const { + // Input bytes read and Output bytes written for the gemm problem + int64_t bytes = + 2 * int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k + + 2 * int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k + + // Half matrix including the diagonal will have (N*(N+1))/2 elements + int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; + + // Set is_beta_zero true if beta is zero + bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); + + // Output bytes read for the gemm problem for non-zero beta values + if (!is_beta_zero) { + bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; + } + + bytes *= batch_count; + + return bytes; +} + +/// Total number of flops computed +int64_t Rank2KOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const { + + // FLOPs = 2 * n(n+1)k/2 [mma1] + 2 * n(n+1)k/2 [mma2] + 2 * n(n+1)/2 [epilogue] + // FLOPs = n(n+1)(2k + 1) + int64_t flops_ = n * (n + 1) * (2*k + 1); + + // complex-valued support + switch (operation_desc.tile_description.math_instruction.math_operation) { + case library::MathOperationID::kMultiplyAddComplex: + flops_ *= 4; + break; + + case library::MathOperationID::kMultiplyAddComplexFastF32: + flops_ *= 4; + break; + + case library::MathOperationID::kMultiplyAddGaussianComplex: + flops_ *= 3; + break; + + default: break; + } + + return flops_; +} + +/// Initializes a performance result +void Rank2KOperationProfiler::RankKProblem::initialize_result( + PerformanceResult &result, + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind)); + + set_argument(result, "A", problem_space, + std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); + + set_argument(result, "B", problem_space, + std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); + + set_argument(result, "C", problem_space, + std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); + + set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); + + set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode)); + + set_argument(result, "n", problem_space, n); + set_argument(result, "k", problem_space, k); + + set_argument(result, "split_k_slices", problem_space, split_k_slices); + set_argument(result, "batch_count", problem_space, batch_count); + + set_argument(result, "alpha", problem_space, + library::lexical_cast(alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(beta, operation_desc.element_epilogue)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Extracts the problem dimensions +Status Rank2KOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::RankKDescription const &operation_desc = + static_cast(operation->description()); + + if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) { + return Status::kErrorInvalidProblem; + } + + Status status = problem_.parse(operation_desc, problem_space, problem); + + if (status != Status::kSuccess) { + return status; + } + + rank_k_workspace_.configuration.problem_size.m() = int(problem_.n); + rank_k_workspace_.configuration.problem_size.n() = int(problem_.n); + rank_k_workspace_.configuration.problem_size.k() = int(problem_.k); + rank_k_workspace_.configuration.lda = problem_.lda; + rank_k_workspace_.configuration.ldb = problem_.ldb; + rank_k_workspace_.configuration.ldc = problem_.ldc; + rank_k_workspace_.configuration.ldd = problem_.ldc; + //rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); + rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices); + + rank_k_workspace_.arguments.A = nullptr; + rank_k_workspace_.arguments.B = nullptr; + rank_k_workspace_.arguments.C = nullptr; + rank_k_workspace_.arguments.D = nullptr; + rank_k_workspace_.arguments.alpha = problem_.alpha.data(); + rank_k_workspace_.arguments.beta = problem_.beta.data(); + rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments); +} + +/// Initializes the performance result +void Rank2KOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + problem_.initialize_result(result, operation_desc, problem_space); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + + result.bytes = problem_.bytes(operation_desc); + result.flops = problem_.flops(operation_desc); + result.runtime = 0; + + +} + +/// Initializes workspace +Status Rank2KOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::RankKDescription const &operation_desc = + static_cast(operation->description()); + + if (options.execution_mode != ExecutionMode::kDryRun) { + int seed_shift = 0; + rank_k_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.n), int(problem_.k)}, + {int(problem_.lda)}, + 1, // batch_count + seed_shift++ + ); + + rank_k_workspace_.B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + {int(problem_.n), int(problem_.k)}, + {int(problem_.ldb)}, + 1, // batch_count + seed_shift++ + ); + + rank_k_workspace_.C = device_context.allocate_tensor( + options, + "C", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.n), int(problem_.n)}, + {int(problem_.ldc)}, + 1, // batch_count + seed_shift++ + ); + + rank_k_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.n), int(problem_.n)}, + {int(problem_.ldc)} + ); + + rank_k_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.n), int(problem_.n)}, + {int(problem_.ldc)} + ); + + rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data()); + rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data()); + } + + + // + // Initialize the CUTLASS operation + // + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration); + rank_k_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration); + rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + + status = operation->initialize( + &rank_k_workspace_.configuration, + rank_k_workspace_.host_workspace.data(), + rank_k_workspace_.device_workspace.data()); + } + + // + // If CUTLASS is enabled, generate a result for it + // + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kRank2K; + results_.back().disposition = Disposition::kNotRun; + + for(auto provider : verification_providers_) { + results_.back().verification_map[provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool Rank2KOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + // Initialize structure containing RankK arguments + rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); + rank_k_workspace_.arguments.B = rank_k_workspace_.B->data(); + rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); + rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); + rank_k_workspace_.arguments.alpha = problem_.alpha.data(); + rank_k_workspace_.arguments.beta = problem_.beta.data(); + rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Run the CUTLASS operation + // + + results_.back().status = operation->run( + &rank_k_workspace_.arguments, + rank_k_workspace_.host_workspace.data(), + rank_k_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + cudaError_t result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + +#if CUTLASS_ENABLE_CUBLAS + if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { + + // Guard against unsupported cases + auto const & rank_k_desc = static_cast(operation->description()); + + if (cublas_satisfies(rank_k_desc) == Status::kSuccess) { + + // call cublas verification if supported + verify_with_cublas_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + else { + // set verification map for cublas to not supported + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; + } + } +#endif // #if CUTLASS_ENABLE_CUBLAS + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + for(auto &m : results_.back().verification_map) { + if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if(is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // Return true means continue profiling + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool Rank2KOperationProfiler::verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + +#if CUTLASS_ENABLE_CUBLAS + + library::RankKDescription const &rank_k_desc = + static_cast(operation->description()); + + // + // Construct cuBLAS operators + // + + CublasCreate handle; + cublasStatus_t status = handle.get_cublas_create_status(); + + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Initialize state + // + + try { + + // + // Construct dispatcher to cublasSyr2k() + // + + // Initialize structure containing RankK arguments + rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); + rank_k_workspace_.arguments.B = rank_k_workspace_.B->data(); + rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data(); + rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data(); + rank_k_workspace_.arguments.alpha = problem_.alpha.data(); + rank_k_workspace_.arguments.beta = problem_.beta.data(); + rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + detail::cublasRankKDispatcher rank_k_op( + rank_k_desc, + rank_k_workspace_.configuration, + rank_k_workspace_.arguments + ); + + if (rank_k_op.status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; + return true; + } + + results_.back().status = Status::kSuccess; + + status = rank_k_op(handle); + + // Handle errors + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Verify results + // + + results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( + options, + *rank_k_workspace_.Computed, + *rank_k_workspace_.Reference + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + rank_k_desc, + library::Provider::kCUTLASS, + library::Provider::kCUBLAS); + } + } + catch (...) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + } + +#endif + + // Return true means continue profiling + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Measures performance results +bool Rank2KOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + // Initialize structure containing RankK arguments + rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); + rank_k_workspace_.arguments.B = rank_k_workspace_.B->data(); + rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); + rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); + rank_k_workspace_.arguments.alpha = problem_.alpha.data(); + rank_k_workspace_.arguments.beta = problem_.beta.data(); + rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &rank_k_workspace_.arguments, + rank_k_workspace_.host_workspace.data(), + rank_k_workspace_.device_workspace.data() + ); + } + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/rank_k_operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/rank_k_operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..ace8c2bbdc3dafa5dd8e3bb5187ae627f613e2dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/rank_k_operation_profiler.cu @@ -0,0 +1,718 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment + + +*/ + +#include +#include +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/profiler/cublas_helpers.h" +#include "cutlass/profiler/rank_k_operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +RankKOperationProfiler::RankKOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kRankK, + { + {ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"}, + {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"}, + {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"}, + {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, + {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, + {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"}, + {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"}, + }, + { library::Provider::kCUBLAS} + ) { + description_ = " Rank-k Update. D = alpha * A*A^T + beta * C (symmetric) or D = alpha * A*A^H + beta * C (hermitian)"; +} + +/// Destructor +RankKOperationProfiler::~RankKOperationProfiler() { + +} + +/// Prints usage statement for the math function +void RankKOperationProfiler::print_usage(std::ostream &out) const { + out << "RankK" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void RankKOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular problem size Syrk kernel:\n" + << " $ cutlass_profiler --operation=rank_k --blas_mode=symmetric --n=1024 --k=128\n\n" + + << "Profile a particular problem size Herk kernel:\n" + << " $ cutlass_profiler --operation=rank_k --blas_mode=hermitian --n=1024 --k=128\n\n" + + << "Schmoo over problem size and beta:\n" + << " $ cutlass_profiler --operation=rank_k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" + + << "Schmoo over accumulator types:\n" + << " $ cutlass_profiler --operation=rank_k --accumulator-type=f16,f32\n\n" + + << "Schmoo over fill modees:\n" + << " $ cutlass_profiler --operation=rank_k --fill_mode=lower/upper\n\n" + + << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" + << " $ cutlass_profiler --operation=rank_k --A=f16:column or --A=*:row\n\n" + + << "Using various input value distribution:\n" + << " $ cutlass_profiler --operation=rank_k --dist=uniform,min:0,max:3\n" + << " $ cutlass_profiler --operation=rank_k --dist=gaussian,mean:0,stddev:3\n" + << " $ cutlass_profiler --operation=rank_k --dist=sequential,start:0,delta:1\n\n" + + << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" + << " $ cutlass_profiler --operation=rank_k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" + + << "Test your changes to rank_k kernels with a quick functional test and save results in functional-test.csv:\n" + << " $ cutlass_profiler --operation=rank_k \\ \n" + << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" + << " --beta=0,1,2 --profiling-iterations=1 \\ \n" + << " --providers=cutlass --output=functional-test.csv\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if 0 +// used this for debugging +static std::string byte_string(std::vector const &bytes) { + std::stringstream ss; + + ss << "0x"; + + for (size_t idx = bytes.size(); idx > 0; --idx) { + ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); + } + + return ss.str(); +} +#endif + +Status RankKOperationProfiler::RankKProblem::parse( + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!arg_as_int(this->n, "n", problem_space, problem)) { + // default value + this->n = 1024; + } + + if (!arg_as_int(this->k, "k", problem_space, problem)) { + // default value + this->k = 1024; + } + + if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + this->split_k_slices = 1; + } + + if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { + // default value + this->batch_count = 1; + } + + if (this->split_k_slices > 1 && this->batch_count > 1) { + // At least one of these must be one + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + this->alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + this->beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, {int(this->n), int(this->k)}).front(); + + this->ldc = DeviceAllocation::get_packed_layout( + operation_desc.C.layout, {int(this->n), int(this->n)}).front(); + + return Status::kSuccess; +} + +/// Total number of bytes loaded +int64_t RankKOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const { + // Input bytes read and Output bytes written for the gemm problem + int64_t bytes = + int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k + + int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k + + // Half matrix including the diagonal will have (N*(N+1))/2 elements + int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; + + // Set is_beta_zero true if beta is zero + bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); + + // Output bytes read for the gemm problem for non-zero beta values + if (!is_beta_zero) { + bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; + } + + bytes *= batch_count; + + return bytes; +} + +/// Total number of flops computed +int64_t RankKOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const { + + // FLOPs = 2 * n(n+1)k/2 [mma] + 2 * n(n+1)/2 [epilogue] + // FLOPs = n(n+1)(k + 1) + int64_t flops_ = n * (n + 1) * (k + 1); + + // complex-valued support + switch (operation_desc.tile_description.math_instruction.math_operation) { + case library::MathOperationID::kMultiplyAddComplex: + flops_ *= 4; + break; + + case library::MathOperationID::kMultiplyAddComplexFastF32: + flops_ *= 4; + break; + + case library::MathOperationID::kMultiplyAddGaussianComplex: + flops_ *= 3; + break; + + default: break; + } + + return flops_; +} + +/// Initializes a performance result +void RankKOperationProfiler::RankKProblem::initialize_result( + PerformanceResult &result, + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind)); + + set_argument(result, "A", problem_space, + std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); + + set_argument(result, "C", problem_space, + std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); + + set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); + + set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode)); + + set_argument(result, "n", problem_space, n); + set_argument(result, "k", problem_space, k); + + set_argument(result, "split_k_slices", problem_space, split_k_slices); + set_argument(result, "batch_count", problem_space, batch_count); + + set_argument(result, "alpha", problem_space, + library::lexical_cast(alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(beta, operation_desc.element_epilogue)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Extracts the problem dimensions +Status RankKOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::RankKDescription const &operation_desc = + static_cast(operation->description()); + + if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) { + return Status::kErrorInvalidProblem; + } + + Status status = problem_.parse(operation_desc, problem_space, problem); + + if (status != Status::kSuccess) { + return status; + } + + rank_k_workspace_.configuration.problem_size.m() = int(problem_.n); + rank_k_workspace_.configuration.problem_size.n() = int(problem_.n); + rank_k_workspace_.configuration.problem_size.k() = int(problem_.k); + rank_k_workspace_.configuration.lda = problem_.lda; + rank_k_workspace_.configuration.ldc = problem_.ldc; + rank_k_workspace_.configuration.ldd = problem_.ldc; + //rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); + rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices); + + rank_k_workspace_.arguments.A = nullptr; + rank_k_workspace_.arguments.C = nullptr; + rank_k_workspace_.arguments.D = nullptr; + rank_k_workspace_.arguments.alpha = problem_.alpha.data(); + rank_k_workspace_.arguments.beta = problem_.beta.data(); + rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments); +} + +/// Initializes the performance result +void RankKOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::RankKDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + problem_.initialize_result(result, operation_desc, problem_space); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + + result.bytes = problem_.bytes(operation_desc); + result.flops = problem_.flops(operation_desc); + + result.runtime = 0; + + // complex-valued support + switch (operation_desc.tile_description.math_instruction.math_operation) { + case library::MathOperationID::kMultiplyAddComplex: + result.flops *= 4; + break; + + case library::MathOperationID::kMultiplyAddComplexFastF32: + result.flops *= 4; + break; + + default: break; + } + +} + +/// Initializes workspace +Status RankKOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::RankKDescription const &operation_desc = + static_cast(operation->description()); + + if (options.execution_mode != ExecutionMode::kDryRun) { + int seed_shift = 0; + rank_k_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.n), int(problem_.k)}, + {int(problem_.lda)}, + 1, // batch_count + seed_shift++ + ); + + rank_k_workspace_.C = device_context.allocate_tensor( + options, + "C", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.n), int(problem_.n)}, + {int(problem_.ldc)}, + 1, // batch_count + seed_shift++ + ); + + rank_k_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.n), int(problem_.n)}, + {int(problem_.ldc)} + ); + + rank_k_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.n), int(problem_.n)}, + {int(problem_.ldc)} + ); + + rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data()); + rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data()); + } + + + // + // Initialize the CUTLASS operation + // + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration); + rank_k_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration); + rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + + status = operation->initialize( + &rank_k_workspace_.configuration, + rank_k_workspace_.host_workspace.data(), + rank_k_workspace_.device_workspace.data()); + } + + // + // If CUTLASS is enabled, generate a result for it + // + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kRankK; + results_.back().disposition = Disposition::kNotRun; + + for(auto provider : verification_providers_) { + results_.back().verification_map[provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool RankKOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + // Initialize structure containing RankK arguments + rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); + rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); + rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); + rank_k_workspace_.arguments.alpha = problem_.alpha.data(); + rank_k_workspace_.arguments.beta = problem_.beta.data(); + rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Run the CUTLASS operation + // + + results_.back().status = operation->run( + &rank_k_workspace_.arguments, + rank_k_workspace_.host_workspace.data(), + rank_k_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + cudaError_t result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + +#if CUTLASS_ENABLE_CUBLAS + if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { + + // Guard against unsupported cases + auto const & rank_k_desc = static_cast(operation->description()); + + if (cublas_satisfies(rank_k_desc) == Status::kSuccess) { + + // call cublas verification if supported + verify_with_cublas_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + else { + // set verification map for cublas to not supported + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; + } + } +#endif // #if CUTLASS_ENABLE_CUBLAS + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + for(auto &m : results_.back().verification_map) { + if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if(is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // Return true means continue profiling + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool RankKOperationProfiler::verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + +#if CUTLASS_ENABLE_CUBLAS + + library::RankKDescription const &rank_k_desc = + static_cast(operation->description()); + + // + // Construct cuBLAS operators + // + + CublasCreate handle; + cublasStatus_t status = handle.get_cublas_create_status(); + + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Initialize state + // + + try { + + // + // Construct dispatcher to cublasSyrk() + // + + // Initialize structure containing RankK arguments + rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); + rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data(); + rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data(); + rank_k_workspace_.arguments.alpha = problem_.alpha.data(); + rank_k_workspace_.arguments.beta = problem_.beta.data(); + rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + detail::cublasRankKDispatcher rank_k_op( + rank_k_desc, + rank_k_workspace_.configuration, + rank_k_workspace_.arguments + ); + + if (rank_k_op.status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; + return true; + } + + results_.back().status = Status::kSuccess; + + status = rank_k_op(handle); + + // Handle errors + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Verify results + // + + results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( + options, + *rank_k_workspace_.Computed, + *rank_k_workspace_.Reference + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + rank_k_desc, + library::Provider::kCUTLASS, + library::Provider::kCUBLAS); + } + } + catch (...) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + } + +#endif + + // Return true means continue profiling + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Measures performance results +bool RankKOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + // Initialize structure containing RankK arguments + rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); + rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); + rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); + rank_k_workspace_.arguments.alpha = problem_.alpha.data(); + rank_k_workspace_.arguments.beta = problem_.beta.data(); + rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &rank_k_workspace_.arguments, + rank_k_workspace_.host_workspace.data(), + rank_k_workspace_.device_workspace.data() + ); + } + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/sparse_gemm_operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/sparse_gemm_operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..2f150a6020a8a062fff7924229f9715fb288efc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/sparse_gemm_operation_profiler.cu @@ -0,0 +1,577 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment + +*/ + +#include +#include +#include +#include + +#include "cutlass/profiler/cublas_helpers.h" +#include "cutlass/profiler/sparse_gemm_operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kSparseGemm, + { + {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. sparse, ...)"}, + {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"}, + {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"}, + {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"}, + {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, + {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, + {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, + {ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"}, + } + ) { + + description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C"; +} + +/// Destructor +SparseGemmOperationProfiler::~SparseGemmOperationProfiler() { + +} + +/// Prints usage statement for the math function +void SparseGemmOperationProfiler::print_usage(std::ostream &out) const { + out << "Sparse GEMM" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void SparseGemmOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular problem size:\n" + << " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 --k=128\n\n" + + << "Schmoo over problem size and beta:\n" + << " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" + + << "Schmoo over accumulator types:\n" + << " $ cutlass_profiler --operation=SparseGemm --accumulator-type=f16,f32\n\n" + + << "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" + << " $ cutlass_profiler --operation=SparseGemm --A=f16:column --B=*:row\n\n" + + << "Using various input value distribution:\n" + << " $ cutlass_profiler --operation=SparseGemm --dist=uniform,min:0,max:3\n" + << " $ cutlass_profiler --operation=SparseGemm --dist=gaussian,mean:0,stddev:3\n" + << " $ cutlass_profiler --operation=SparseGemm --dist=sequential,start:0,delta:1\n\n" + + << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" + << " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" + + << "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n" + << " $ cutlass_profiler --operation=SparseGemm \\ \n" + << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" + << " --beta=0,1,2 --profiling-iterations=1 \\ \n" + << " --providers=cutlass --output=functional-test.csv\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Status SparseGemmOperationProfiler::SparseGemmProblem::parse( + library::SparseGemmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!arg_as_int(this->m, "m", problem_space, problem)) { + // default value + this->m = 1024; + } + + if (!arg_as_int(this->n, "n", problem_space, problem)) { + // default value + this->n = 1024; + } + + if (!arg_as_int(this->k, "k", problem_space, problem)) { + // default value + this->k = 1024; + } + + if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + this->split_k_slices = 1; + } + + if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { + // default value + this->batch_count = 1; + } + + if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + this->alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + this->beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + this->elements_per_128b = + 128 / library::sizeof_bits(operation_desc.A.element); + + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, + {int(this->m), int(this->k) / int(this->sparse)}) + .front(); + + this->ldb = DeviceAllocation::get_packed_layout( + operation_desc.B.layout, {int(this->k), int(this->n)}).front(); + + this->ldc = DeviceAllocation::get_packed_layout( + operation_desc.C.layout, {int(this->m), int(this->n)}).front(); + + this->lde = + DeviceAllocation::get_packed_layout( + operation_desc.E.layout, + {int(this->m), int(this->k / this->sparse / this->elements_per_128b)}) + .front(); + + return Status::kSuccess; +} + +/// Initializes a performance result +void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result( + PerformanceResult &result, + library::SparseGemmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind)); + + set_argument(result, "A", problem_space, + std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); + + set_argument(result, "B", problem_space, + std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); + + set_argument(result, "C", problem_space, + std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); + + set_argument(result, "E", problem_space, + std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout)); + + set_argument(result, "m", problem_space, m); + set_argument(result, "n", problem_space, n); + set_argument(result, "k", problem_space, k); + + set_argument(result, "split_k_slices", problem_space, split_k_slices); + set_argument(result, "batch_count", problem_space, batch_count); + + set_argument(result, "alpha", problem_space, + library::lexical_cast(alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(beta, operation_desc.element_epilogue)); +} + +/// Extracts the problem dimensions +Status SparseGemmOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::SparseGemmDescription const &operation_desc = + static_cast(operation->description()); + + if (operation_desc.gemm_kind != library::GemmKind::kSparse) { + return Status::kErrorInvalidProblem; + } + + Status status = problem_.parse(operation_desc, problem_space, problem); + + if (status != Status::kSuccess) { + return status; + } + + gemm_workspace_.configuration.problem_size.m() = int(problem_.m); + gemm_workspace_.configuration.problem_size.n() = int(problem_.n); + gemm_workspace_.configuration.problem_size.k() = int(problem_.k); + gemm_workspace_.configuration.lda = problem_.lda; + gemm_workspace_.configuration.ldb = problem_.ldb; + gemm_workspace_.configuration.ldc = problem_.ldc; + gemm_workspace_.configuration.ldd = problem_.ldc; + gemm_workspace_.configuration.lde = problem_.lde; + + gemm_workspace_.arguments.A = nullptr; + gemm_workspace_.arguments.B = nullptr; + gemm_workspace_.arguments.C = nullptr; + gemm_workspace_.arguments.D = nullptr; + gemm_workspace_.arguments.E = nullptr; + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments); +} + +/// Initializes the performance result +void SparseGemmOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::SparseGemmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + problem_.initialize_result(result, operation_desc, problem_space); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + // Input bytes read and Output bytes written for the gemm problem + result.bytes = + int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * + problem_.k / problem_.sparse + + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) * + problem_.k + + int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * + problem_.n + + int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) * + problem_.k / problem_.sparse / problem_.elements_per_128b; + + // Set is_beta_zero true if beta is zero + bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i==0; }); + + // Output bytes read for the gemm problem for non-zero beta values + if (!is_beta_zero) { + result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n; + } + + result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n); + result.runtime = 0; + +} + +/// Initializes workspace +Status SparseGemmOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::SparseGemmDescription const &operation_desc = + static_cast(operation->description()); + + if (options.execution_mode != ExecutionMode::kDryRun) { + int seed_shift = 0; + gemm_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.m), int(problem_.k) / int(problem_.sparse)}, + {int(problem_.lda)}, + 1, // batch_count + seed_shift++ + ); + + gemm_workspace_.B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + {int(problem_.k), int(problem_.n)}, + {int(problem_.ldb)}, + 1, // batch_count + seed_shift++ + ); + + gemm_workspace_.C = device_context.allocate_tensor( + options, + "C", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)}, + 1, // batch_count + seed_shift++ + ); + + gemm_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)} + ); + + gemm_workspace_.E = device_context.allocate_sparsemeta_tensor( + options, + "E", + operation_desc.E.element, + operation_desc.E.layout, + operation_desc.A.element, + {int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)}, + {int(problem_.lde)}, + 1, // batch_count + seed_shift++ + ); + + gemm_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)} + ); + + gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data()); + } + + // + // Initialize the CUTLASS operation + // + + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = operation->get_host_workspace_size(&gemm_workspace_.configuration); + gemm_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = operation->get_device_workspace_size(&gemm_workspace_.configuration); + gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + + status = operation->initialize( + &gemm_workspace_.configuration, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data()); + } + + // + // If CUTLASS is enabled, generate a result for it + // + + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kSparseGemm; + results_.back().disposition = Disposition::kNotRun; + + for(auto &verification_provider : options.verification.providers) { + results_.back().verification_map[verification_provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool SparseGemmOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + // Initialize structure containing GEMM arguments + gemm_workspace_.arguments.A = gemm_workspace_.A->data(); + gemm_workspace_.arguments.B = gemm_workspace_.B->data(); + gemm_workspace_.arguments.C = gemm_workspace_.C->data(); + gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); + gemm_workspace_.arguments.E = gemm_workspace_.E->data(); + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Run the CUTLASS operation + // + + results_.back().status = operation->run( + &gemm_workspace_.arguments, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + cudaError_t result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + + for(auto &m : results_.back().verification_map) { + if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if(is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // Return true means continue profiling + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Measures performance results +bool SparseGemmOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + // Initialize structure containing GEMM arguments + gemm_workspace_.arguments.A = gemm_workspace_.A->data(); + gemm_workspace_.arguments.B = gemm_workspace_.B->data(); + gemm_workspace_.arguments.C = gemm_workspace_.C->data(); + gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); + gemm_workspace_.arguments.E = gemm_workspace_.E->data(); + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &gemm_workspace_.arguments, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data() + ); + } + + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/symm_operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/symm_operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..1364c46637e6506b7a880a305863924f7bd511b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/symm_operation_profiler.cu @@ -0,0 +1,769 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment + + +*/ + +#include +#include +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/profiler/cublas_helpers.h" +#include "cutlass/profiler/symm_operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +SymmOperationProfiler::SymmOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kSymm, + { + {ArgumentTypeID::kEnumerated, {"symm_kind"}, "Variant of Symm (universal)"}, + {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the Symm problem space"}, + {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the Symm problem space"}, + {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, + {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, + {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, + {ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for Symm kernel (left or right)"}, + {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for Symm kernel (lower or upper)"}, + {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for Symm kernel (symmetric or hermitian)"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of Symm computed in one batch"}, + }, + { library::Provider::kCUBLAS } + ) { + description_ = " Symmetric Matrix-Matrix Multiplication. D = alpha * A * B OR alpha * B * A + beta * C (where A is symmetric/hermitian)"; +} + +/// Destructor +SymmOperationProfiler::~SymmOperationProfiler() { + +} + +/// Prints usage statement for the math function +void SymmOperationProfiler::print_usage(std::ostream &out) const { + out << "Symm" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void SymmOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular problem size SYMM kernel:\n" + << " $ cutlass_profiler --operation=Symm --blas_mode=symmetric --m=1024 --n=128\n\n" + + << "Profile a particular problem size HEMM kernel:\n" + << " $ cutlass_profiler --operation=Symm --blas_mode=hermitian --m=1024 --n=128\n\n" + + << "Schmoo over problem size and beta:\n" + << " $ cutlass_profiler --operation=Symm --m=1024:4096:256 --n=128:8192:128 --beta=0,1,2.5\n\n" + + << "Schmoo over accumulator types:\n" + << " $ cutlass_profiler --operation=Symm --accumulator-type=f16,f32\n\n" + + << "Schmoo over side modees:\n" + << " $ cutlass_profiler --operation=Symm --side_mode=left/right\n\n" + + << "Schmoo over fill modees:\n" + << " $ cutlass_profiler --operation=Symm --fill_mode=lower/upper\n\n" + + << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" + << " $ cutlass_profiler --operation=Symm --A=f16:column or --A=*:row\n\n" + + << "Using various input value distribution:\n" + << " $ cutlass_profiler --operation=Symm --dist=uniform,min:0,max:3\n" + << " $ cutlass_profiler --operation=Symm --dist=gaussian,mean:0,stddev:3\n" + << " $ cutlass_profiler --operation=Symm --dist=sequential,start:0,delta:1\n\n" + + << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" + << " $ cutlass_profiler --operation=Symm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" + + << "Test your changes to symm kernels with a quick functional test and save results in functional-test.csv:\n" + << " $ cutlass_profiler --operation=Symm \\ \n" + << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --n=8,16,32,64,128,256,288,384,504,512,520 \\ \n" + << " --beta=0,1,2 --profiling-iterations=1 \\ \n" + << " --providers=cutlass --output=functional-test.csv\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if 0 +// used this for debugging +static std::string byte_string(std::vector const &bytes) { + std::stringstream ss; + + ss << "0x"; + + for (size_t idx = bytes.size(); idx > 0; --idx) { + ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); + } + + return ss.str(); +} +#endif + +Status SymmOperationProfiler::SymmProblem::parse( + library::SymmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!arg_as_int(this->m, "m", problem_space, problem)) { + // default value + this->m = 1024; + } + + if (!arg_as_int(this->n, "n", problem_space, problem)) { + // default value + this->n = 1024; + } + + if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + this->split_k_slices = 1; + } + + if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { + // default value + this->batch_count = 1; + } + + if (this->split_k_slices > 1 && this->batch_count > 1) { + // At least one of these must be one + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + this->alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + this->beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + if (operation_desc.side_mode == SideMode::kLeft) { + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, {int(this->m), int(this->m)}).front(); + } + else if (operation_desc.side_mode == SideMode::kRight) { + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, {int(this->n), int(this->n)}).front(); + } + + this->ldb = DeviceAllocation::get_packed_layout( + operation_desc.B.layout, {int(this->m), int(this->n)}).front(); + + this->ldc = DeviceAllocation::get_packed_layout( + operation_desc.C.layout, {int(this->m), int(this->n)}).front(); + + return Status::kSuccess; +} + +/// Total number of bytes loaded +int64_t SymmOperationProfiler::SymmProblem::bytes(library::SymmDescription const &operation_desc) const { + int64_t bytes; + // Input bytes read and Output bytes written for the gemm problem + // Half matrix including the diagonal will have (X*(X+1))/2 elements + if (operation_desc.side_mode == SideMode::kLeft) { + bytes = + int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * (m + 1) / 2 + + int64_t(library::sizeof_bits(operation_desc.B.element) * m / 8) * n + + int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n; + } else if (operation_desc.side_mode == SideMode::kRight) { + bytes = + int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * (n + 1) / 2 + + int64_t(library::sizeof_bits(operation_desc.B.element) * m / 8) * n + + int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n; + } + // Set is_beta_zero true if beta is zero + bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); + + // Output bytes read for the gemm problem for non-zero beta values + if (!is_beta_zero) { + bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n; + } + + bytes *= batch_count; + + return bytes; +} + +/// Total number of flops computed +int64_t SymmOperationProfiler::SymmProblem::flops(library::SymmDescription const &operation_desc) const { + + // FLOPs for first TRMM kernel (with diagonal) = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero + // FLOPs for second TRMM kernel (with diagonal) = 2 * [ ( M * (M-1)/2 * N ) ] // Beta is zero + // FLOPs = m*(m+1)*n [mma1] + m*(m-1)*n [mma2] + 2*m*n [epilogue] + // FLOPs = 2*m*n(m+1) for left side mode + // FLOPs can also be calculated to be same as GEMM with correct value for 'k' as below. + int64_t k = (operation_desc.side_mode == SideMode::kLeft) ? int64_t(m) : int64_t(n); + int64_t flops_ = (int64_t(m) * n * k + m * n) * 2; + + // complex-valued support + switch (operation_desc.tile_description.math_instruction.math_operation) { + case library::MathOperationID::kMultiplyAddComplex: + flops_ *= 4; + break; + + case library::MathOperationID::kMultiplyAddComplexFastF32: + flops_ *= 4; + break; + + case library::MathOperationID::kMultiplyAddGaussianComplex: + flops_ *= 3; + break; + + default: break; + } + + return flops_; +} + +/// Initializes a performance result +void SymmOperationProfiler::SymmProblem::initialize_result( + PerformanceResult &result, + library::SymmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "symm_kind", problem_space, library::to_string(operation_desc.symm_kind)); + + set_argument(result, "A", problem_space, + std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); + + set_argument(result, "B", problem_space, + std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); + + set_argument(result, "C", problem_space, + std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); + + set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode)); + + set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); + + set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode)); + + set_argument(result, "m", problem_space, m); + set_argument(result, "n", problem_space, n); + + set_argument(result, "split_k_slices", problem_space, split_k_slices); + set_argument(result, "batch_count", problem_space, batch_count); + + set_argument(result, "alpha", problem_space, + library::lexical_cast(alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(beta, operation_desc.element_epilogue)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Extracts the problem dimensions +Status SymmOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::SymmDescription const &operation_desc = + static_cast(operation->description()); + + if (operation_desc.symm_kind != library::SymmKind::kUniversal) { + return Status::kErrorInvalidProblem; + } + + Status status = problem_.parse(operation_desc, problem_space, problem); + + if (status != Status::kSuccess) { + return status; + } + + symm_workspace_.configuration.problem_size.m() = int(problem_.m); + symm_workspace_.configuration.problem_size.n() = int(problem_.n); + symm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft) + ? int(problem_.m) : int(problem_.n); + symm_workspace_.configuration.lda = problem_.lda; + symm_workspace_.configuration.ldb = problem_.ldb; + symm_workspace_.configuration.ldc = problem_.ldc; + symm_workspace_.configuration.ldd = problem_.ldc; + //symm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); + symm_workspace_.configuration.batch_count = int(problem_.split_k_slices); + + symm_workspace_.arguments.A = nullptr; + symm_workspace_.arguments.B = nullptr; + symm_workspace_.arguments.C = nullptr; + symm_workspace_.arguments.D = nullptr; + symm_workspace_.arguments.alpha = problem_.alpha.data(); + symm_workspace_.arguments.beta = problem_.beta.data(); + symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&symm_workspace_.configuration, &symm_workspace_.arguments); +} + +/// Initializes the performance result +void SymmOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::SymmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + problem_.initialize_result(result, operation_desc, problem_space); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + + result.bytes = problem_.bytes(operation_desc); + result.flops = problem_.flops(operation_desc); + result.runtime = 0; + + +} + +/// Initializes workspace +Status SymmOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::SymmDescription const &operation_desc = + static_cast(operation->description()); + + if (options.execution_mode != ExecutionMode::kDryRun) { + int seed_shift = 0; + if (operation_desc.side_mode == SideMode::kLeft) { + symm_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.m), int(problem_.m)}, + {int(problem_.lda)}, + 1, // batch_count + seed_shift++ + ); + } else if (operation_desc.side_mode == SideMode::kRight) { + symm_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.n), int(problem_.n)}, + {int(problem_.lda)}, + 1, // batch_count + seed_shift++ + ); + } + + symm_workspace_.B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldb)}, + 1, // batch_count + seed_shift++ + ); + + symm_workspace_.C = device_context.allocate_tensor( + options, + "C", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)}, + 1, // batch_count + seed_shift++ + ); + + symm_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)} + ); + + symm_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)} + ); + + symm_workspace_.Computed->copy_from_device(symm_workspace_.C->data()); + symm_workspace_.Reference->copy_from_device(symm_workspace_.C->data()); + } + + + // + // Initialize the CUTLASS operation + // + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = operation->get_host_workspace_size(&symm_workspace_.configuration); + symm_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = operation->get_device_workspace_size(&symm_workspace_.configuration); + symm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + + status = operation->initialize( + &symm_workspace_.configuration, + symm_workspace_.host_workspace.data(), + symm_workspace_.device_workspace.data()); + } + + // + // If CUTLASS is enabled, generate a result for it + // + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kSymm; + results_.back().disposition = Disposition::kNotRun; + + for(auto provider : verification_providers_) { + results_.back().verification_map[provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool SymmOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + // Initialize structure containing Symm arguments + symm_workspace_.arguments.A = symm_workspace_.A->data(); + symm_workspace_.arguments.B = symm_workspace_.B->data(); + symm_workspace_.arguments.C = symm_workspace_.C->data(); + symm_workspace_.arguments.D = symm_workspace_.Computed->data(); + symm_workspace_.arguments.alpha = problem_.alpha.data(); + symm_workspace_.arguments.beta = problem_.beta.data(); + symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Run the CUTLASS operation + // + + results_.back().status = operation->run( + &symm_workspace_.arguments, + symm_workspace_.host_workspace.data(), + symm_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + cudaError_t result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + +#if CUTLASS_ENABLE_CUBLAS + if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { + + // Guard against unsupported cases + auto const & symm_desc = static_cast(operation->description()); + + if (cublas_satisfies(symm_desc) == Status::kSuccess) { + + // call cublas verification if supported + verify_with_cublas_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + else { + // set verification map for cublas to not supported + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; + } + } +#endif // #if CUTLASS_ENABLE_CUBLAS + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + for(auto &m : results_.back().verification_map) { + if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if(is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // Return true means continue profiling + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool SymmOperationProfiler::verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + +#if CUTLASS_ENABLE_CUBLAS + + library::SymmDescription const &symm_desc = + static_cast(operation->description()); + + // + // Construct cuBLAS operators + // + + CublasCreate handle; + cublasStatus_t status = handle.get_cublas_create_status(); + + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Initialize state + // + + try { + + // + // Construct dispatcher to cublasSymm() + // + + // Initialize structure containing Symm arguments + symm_workspace_.arguments.A = symm_workspace_.A->data(); + symm_workspace_.arguments.B = symm_workspace_.B->data(); + symm_workspace_.arguments.C = symm_workspace_.Reference->data(); + symm_workspace_.arguments.D = symm_workspace_.Reference->data(); + symm_workspace_.arguments.alpha = problem_.alpha.data(); + symm_workspace_.arguments.beta = problem_.beta.data(); + symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + detail::cublasSymmDispatcher symm_op( + symm_desc, + symm_workspace_.configuration, + symm_workspace_.arguments + ); + + if (symm_op.status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; + return true; + } + + results_.back().status = Status::kSuccess; + + status = symm_op(handle); + + // Handle errors + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Verify results + // + + results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( + options, + *symm_workspace_.Computed, + *symm_workspace_.Reference + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + symm_desc, + library::Provider::kCUTLASS, + library::Provider::kCUBLAS); + } + } + catch (...) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + } + +#endif + + // Return true means continue profiling + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Measures performance results +bool SymmOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + // Initialize structure containing Symm arguments + symm_workspace_.arguments.A = symm_workspace_.A->data(); + symm_workspace_.arguments.B = symm_workspace_.B->data(); + symm_workspace_.arguments.C = symm_workspace_.C->data(); + symm_workspace_.arguments.D = symm_workspace_.Computed->data(); + symm_workspace_.arguments.alpha = problem_.alpha.data(); + symm_workspace_.arguments.beta = problem_.beta.data(); + symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &symm_workspace_.arguments, + symm_workspace_.host_workspace.data(), + symm_workspace_.device_workspace.data() + ); + } + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/trmm_operation_profiler.cu b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/trmm_operation_profiler.cu new file mode 100644 index 0000000000000000000000000000000000000000..7d61f79f7b4848ec24502c9ecb9c751f0ec9968c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/profiler/src/trmm_operation_profiler.cu @@ -0,0 +1,708 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Execution environment + + +*/ + +#include +#include +#include +#include + +#include "cutlass/core_io.h" + +#include "cutlass/profiler/cublas_helpers.h" +#include "cutlass/profiler/trmm_operation_profiler.h" +#include "cutlass/profiler/gpu_timer.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +TrmmOperationProfiler::TrmmOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kTrmm, + { + {ArgumentTypeID::kEnumerated, {"trmm_kind"}, "Variant of TRMM (universal)"}, + {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the TRMM problem space"}, + {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the TRMM problem space"}, + {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, + {ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for TRMM (left, right)"}, + {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for TRMM (lower, upper)"}, + {ArgumentTypeID::kEnumerated, {"diag_type"}, "Diag Type for TRMM (nonunit, unit)"}, + {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, + {ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D operand"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of TRMMs computed in one batch"}, + }, + { library::Provider::kCUBLAS} + ) { + description_ = " Triangular Matrix-Multiplication. D = alpha * A * B or alpha * B * A"; +} + +/// Destructor +TrmmOperationProfiler::~TrmmOperationProfiler() { + +} + +/// Prints usage statement for the math function +void TrmmOperationProfiler::print_usage(std::ostream &out) const { + out << "TRMM" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void TrmmOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular problem size:\n" + << " $ cutlass_profiler --operation=Trmm --n=1024 --m=128\n\n" + + << "Schmoo over problem size and beta:\n" + << " $ cutlass_profiler --operation=Trmm --n=1024:4096:256 --m=128:8192:128 --beta=0,1,2.5\n\n" + + << "Schmoo over accumulator types:\n" + << " $ cutlass_profiler --operation=Trmm --accumulator-type=f16,f32\n\n" + + << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" + << " $ cutlass_profiler --operation=Trmm --A=f16:column or --A=*:row\n\n" + + << "Using various input value distribution:\n" + << " $ cutlass_profiler --operation=Trmm --dist=uniform,min:0,max:3\n" + << " $ cutlass_profiler --operation=Trmm --dist=gaussian,mean:0,stddev:3\n" + << " $ cutlass_profiler --operation=Trmm --dist=sequential,start:0,delta:1\n\n" + + << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" + << " $ cutlass_profiler --operation=Trmm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" + + << "Test your changes to trmm kernels with a quick functional test and save results in functional-test.csv:\n" + << " $ cutlass_profiler --operation=Trmm \\ \n" + << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" + << " --beta=0,1,2 --profiling-iterations=1 \\ \n" + << " --providers=cutlass --output=functional-test.csv\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if 0 +// used this for debugging +static std::string byte_string(std::vector const &bytes) { + std::stringstream ss; + + ss << "0x"; + + for (size_t idx = bytes.size(); idx > 0; --idx) { + ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); + } + + return ss.str(); +} +#endif + +Status TrmmOperationProfiler::TrmmProblem::parse( + library::TrmmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!arg_as_int(this->m, "m", problem_space, problem)) { + // default value + this->m = 1024; + } + + if (!arg_as_int(this->n, "n", problem_space, problem)) { + // default value + this->n = 1024; + } + + if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + this->split_k_slices = 1; + } + + if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { + // default value + this->batch_count = 1; + } + + if (this->split_k_slices > 1 && this->batch_count > 1) { + // At least one of these must be one + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + this->alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + this->beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + if (operation_desc.side_mode == SideMode::kLeft) { + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, {int(this->m), int(this->m)}).front(); + } + else if (operation_desc.side_mode == SideMode::kRight) { + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, {int(this->n), int(this->n)}).front(); + } + + this->ldb = DeviceAllocation::get_packed_layout( + operation_desc.B.layout, {int(this->m), int(this->n)}).front(); + + this->ldd = DeviceAllocation::get_packed_layout( + operation_desc.D.layout, {int(this->m), int(this->n)}).front(); + + return Status::kSuccess; +} + +/// Initializes a performance result +void TrmmOperationProfiler::TrmmProblem::initialize_result( + PerformanceResult &result, + library::TrmmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "trmm_kind", problem_space, library::to_string(operation_desc.trmm_kind)); + + set_argument(result, "A", problem_space, + std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); + + set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode)); + + set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); + + set_argument(result, "diag_type", problem_space, library::to_string(operation_desc.diag_type)); + + set_argument(result, "B", problem_space, + std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); + + set_argument(result, "D", problem_space, + std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout)); + + set_argument(result, "m", problem_space, m); + set_argument(result, "n", problem_space, n); + + set_argument(result, "split_k_slices", problem_space, split_k_slices); + set_argument(result, "batch_count", problem_space, batch_count); + + set_argument(result, "alpha", problem_space, + library::lexical_cast(alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(beta, operation_desc.element_epilogue)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Extracts the problem dimensions +Status TrmmOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::TrmmDescription const &operation_desc = + static_cast(operation->description()); + + if (operation_desc.trmm_kind != library::TrmmKind::kUniversal) { + return Status::kErrorInvalidProblem; + } + + Status status = problem_.parse(operation_desc, problem_space, problem); + + if (status != Status::kSuccess) { + return status; + } + + trmm_workspace_.configuration.problem_size.m() = int(problem_.m); + trmm_workspace_.configuration.problem_size.n() = int(problem_.n); + trmm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft) + ? int(problem_.m) : int(problem_.n); + trmm_workspace_.configuration.lda = problem_.lda; + trmm_workspace_.configuration.ldb = problem_.ldb; + trmm_workspace_.configuration.ldd = problem_.ldd; + //trmm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); + trmm_workspace_.configuration.batch_count = int(problem_.split_k_slices); + + trmm_workspace_.arguments.A = nullptr; + trmm_workspace_.arguments.B = nullptr; + trmm_workspace_.arguments.D = nullptr; + trmm_workspace_.arguments.alpha = problem_.alpha.data(); + trmm_workspace_.arguments.beta = problem_.beta.data(); + trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&trmm_workspace_.configuration, &trmm_workspace_.arguments); +} + +/// Initializes the performance result +void TrmmOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::TrmmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + problem_.initialize_result(result, operation_desc, problem_space); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + if (operation_desc.side_mode == SideMode::kLeft) { + // Input bytes read and Output bytes written for the trmm problem + result.bytes = + // Half matrix including the diagonal will have (M*(M+1))/2 elements + int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * (problem_.m + 1) / 2 + + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n + + int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n; + } else if (operation_desc.side_mode == SideMode::kRight) { + // Input bytes read and Output bytes written for the trmm problem + result.bytes = + // Half matrix including the diagonal will have (N*(N+1))/2 elements + int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.n / 8) * (problem_.n + 1) / 2 + + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n + + int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n; + } + + // FLOPs = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero + result.flops = problem_.m * (problem_.m + 1) * problem_.n; + + result.runtime = 0; + + // complex-valued support + switch (operation_desc.tile_description.math_instruction.math_operation) { + case library::MathOperationID::kMultiplyAddComplex: + result.flops *= 4; + break; + + case library::MathOperationID::kMultiplyAddComplexFastF32: + result.flops *= 4; + break; + + default: break; + } + +} + +/// Initializes workspace +Status TrmmOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::TrmmDescription const &operation_desc = + static_cast(operation->description()); + + if (options.execution_mode != ExecutionMode::kDryRun) { + int seed_shift = 0; + if (operation_desc.side_mode == SideMode::kLeft) { + trmm_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.m), int(problem_.m)}, + {int(problem_.lda)}, + 1, // batch_count + seed_shift++ + ); + } else if (operation_desc.side_mode == SideMode::kRight) { + trmm_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.n), int(problem_.n)}, + {int(problem_.lda)}, + 1, // batch_count + seed_shift++ + ); + } + + trmm_workspace_.B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldb)}, + 1, // batch_count + seed_shift++ + ); + + trmm_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.D.element, + operation_desc.D.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldd)} + ); + + trmm_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.D.element, + operation_desc.D.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldd)} + ); + + } + + // + // Initialize the CUTLASS operation + // + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = operation->get_host_workspace_size(&trmm_workspace_.configuration); + trmm_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = operation->get_device_workspace_size(&trmm_workspace_.configuration); + trmm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + + status = operation->initialize( + &trmm_workspace_.configuration, + trmm_workspace_.host_workspace.data(), + trmm_workspace_.device_workspace.data()); + } + + // + // If CUTLASS is enabled, generate a result for it + // + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kTrmm; + results_.back().disposition = Disposition::kNotRun; + + for(auto provider : verification_providers_) { + results_.back().verification_map[provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool TrmmOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + // Initialize structure containing TRMM arguments + trmm_workspace_.arguments.A = trmm_workspace_.A->data(); + trmm_workspace_.arguments.B = trmm_workspace_.B->data(); + trmm_workspace_.arguments.D = trmm_workspace_.Computed->data(); + trmm_workspace_.arguments.alpha = problem_.alpha.data(); + trmm_workspace_.arguments.beta = problem_.beta.data(); + trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Run the CUTLASS operation + // + + results_.back().status = operation->run( + &trmm_workspace_.arguments, + trmm_workspace_.host_workspace.data(), + trmm_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + cudaError_t result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + +#if CUTLASS_ENABLE_CUBLAS + if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { + + // Guard against unsupported cases + auto const & trmm_desc = static_cast(operation->description()); + + if (cublas_satisfies(trmm_desc) == Status::kSuccess) { + + // call cublas verification if supported + verify_with_cublas_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + else { + // set verification map for cublas to not supported + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; + } + } +#endif // #if CUTLASS_ENABLE_CUBLAS + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + for(auto &m : results_.back().verification_map) { + if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if(is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // Return true means continue profiling + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool TrmmOperationProfiler::verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + +#if CUTLASS_ENABLE_CUBLAS + + library::TrmmDescription const &trmm_desc = + static_cast(operation->description()); + + // + // Construct cuBLAS operators + // + + CublasCreate handle; + cublasStatus_t status = handle.get_cublas_create_status(); + + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Initialize state + // + + try { + + // + // Construct dispatcher to cublasTrmm() + // + + // Initialize structure containing TRMM arguments + trmm_workspace_.arguments.A = trmm_workspace_.A->data(); + trmm_workspace_.arguments.B = trmm_workspace_.B->data(); + trmm_workspace_.arguments.D = trmm_workspace_.Reference->data(); + trmm_workspace_.arguments.alpha = problem_.alpha.data(); + trmm_workspace_.arguments.beta = problem_.beta.data(); + trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + detail::cublasTrmmDispatcher trmm_op( + trmm_desc, + trmm_workspace_.configuration, + trmm_workspace_.arguments + ); + + if (trmm_op.status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; + return true; + } + + results_.back().status = Status::kSuccess; + + status = trmm_op(handle); + + // Handle errors + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Verify results + // + results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( + options, + *trmm_workspace_.Computed, + *trmm_workspace_.Reference + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + trmm_desc, + library::Provider::kCUTLASS, + library::Provider::kCUBLAS); + } + } + catch (...) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + } + +#endif + + // Return true means continue profiling + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Measures performance results +bool TrmmOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + // Initialize structure containing TRMM arguments + trmm_workspace_.arguments.A = trmm_workspace_.A->data(); + trmm_workspace_.arguments.B = trmm_workspace_.B->data(); + trmm_workspace_.arguments.D = trmm_workspace_.Computed->data(); + trmm_workspace_.arguments.alpha = problem_.alpha.data(); + trmm_workspace_.arguments.beta = problem_.beta.data(); + trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &trmm_workspace_.arguments, + trmm_workspace_.host_workspace.data(), + trmm_workspace_.device_workspace.data() + ); + } + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/CMakeLists.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..bc96016e4c392bbd13659d5ec47152b2894654c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/CMakeLists.txt @@ -0,0 +1,55 @@ +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +cmake_policy(SET CMP0112 NEW) +add_library(cutlass_tools_util_includes INTERFACE) +add_library(nvidia::cutlass::tools::util ALIAS cutlass_tools_util_includes) +set_target_properties(cutlass_tools_util_includes PROPERTIES EXPORT_NAME tools::util) + +target_include_directories( + cutlass_tools_util_includes + INTERFACE + $ + $ + ) + +target_link_libraries( + cutlass_tools_util_includes + INTERFACE + $<$:cublas> + ) + +install( + DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/ + ) + +install( + TARGETS cutlass_tools_util_includes + EXPORT NvidiaCutlass + ) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/GPU_Clock.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/GPU_Clock.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5f2dd4bd14c8fc1dee8d6d7265661ceb2d3850b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/GPU_Clock.hpp @@ -0,0 +1,67 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include + +struct GPU_Clock +{ + GPU_Clock() { + cudaEventCreate(&start_); + cudaEventCreate(&stop_); + cudaEventRecord(start_); + } + + ~GPU_Clock() { + cudaEventDestroy(start_); + cudaEventDestroy(stop_); + } + + void start() { + cudaEventRecord(start_); + } + + float milliseconds() { + cudaEventRecord(stop_); + cudaEventSynchronize(stop_); + float time; + cudaEventElapsedTime(&time, start_, stop_); + return time; + } + + float seconds() { + return milliseconds() * float(1e-3); + } + + private: + cudaEvent_t start_, stop_; +}; diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/command_line.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/command_line.h new file mode 100644 index 0000000000000000000000000000000000000000..9b6738d914f861c4c34cb14f8c8f6fa3426884b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/command_line.h @@ -0,0 +1,313 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * Utility for parsing command line arguments + */ + +#include +#include +#include +#include +#include + +#include + +#include "cutlass/cutlass.h" + +namespace cutlass { + +/****************************************************************************** + * command_line + ******************************************************************************/ + +/** + * Utility for parsing command line arguments + */ +struct CommandLine { + std::vector keys; + std::vector values; + std::vector args; + + /** + * Constructor + */ + CommandLine(int argc, const char** argv) { + using namespace std; + + for (int i = 1; i < argc; i++) { + string arg = argv[i]; + + if ((arg[0] != '-') || (arg[1] != '-')) { + args.push_back(arg); + continue; + } + + string::size_type pos; + string key, val; + if ((pos = arg.find('=')) == string::npos) { + key = string(arg, 2, arg.length() - 2); + val = ""; + } else { + key = string(arg, 2, pos - 2); + val = string(arg, pos + 1, arg.length() - 1); + } + + keys.push_back(key); + values.push_back(val); + } + } + + /** + * Checks whether a flag "--" is present in the commandline + */ + bool check_cmd_line_flag(const char* arg_name) const { + using namespace std; + + for (int i = 0; i < int(keys.size()); ++i) { + if (keys[i] == string(arg_name)) return true; + } + return false; + } + + /** + * Returns number of naked (non-flag and non-key-value) commandline parameters + */ + size_t num_naked_args() const { + return args.size(); + } + + /** + * Print naked (non-flag and non-key-value) commandline parameters + */ + void print_naked_args(std::ostream &out) const { + for (auto arg : args) { + out << " " << arg <<"\n"; + } + } + + /** + * Returns the commandline parameter for a given index (not including flags) + */ + template + void get_cmd_line_argument(int index, value_t& val) const { + using namespace std; + if (index < args.size()) { + istringstream str_stream(args[index]); + str_stream >> val; + } + } + + /** + * Obtains the boolean value specified for a given commandline parameter --= + */ + void get_cmd_line_argument(const char* arg_name, bool& val, bool _default) const { + val = _default; + if (check_cmd_line_flag(arg_name)) { + std::string value; + get_cmd_line_argument(arg_name, value); + + val = !(value == "0" || value == "false"); + } + } + + /** + * Obtains the value specified for a given commandline parameter --= + */ + template + void get_cmd_line_argument(const char* arg_name, + value_t& val) const { + + get_cmd_line_argument(arg_name, val, val); + } + + /** + * Obtains the value specified for a given commandline parameter --= + */ + template + void get_cmd_line_argument(const char* arg_name, + value_t& val, + value_t const& _default) const { + using namespace std; + + val = _default; + + for (int i = 0; i < int(keys.size()); ++i) { + if (keys[i] == string(arg_name)) { + istringstream str_stream(values[i]); + str_stream >> val; + } + } + } + + /** + * Returns the values specified for a given commandline parameter --=,* + */ + template + void get_cmd_line_arguments(const char* arg_name, + std::vector& vals, + char sep = ',') const { + using namespace std; + + if (check_cmd_line_flag(arg_name)) { + // Clear any default values + vals.clear(); + + // Recover from multi-value string + for (int i = 0; i < keys.size(); ++i) { + if (keys[i] == string(arg_name)) { + string val_string(values[i]); + separate_string(val_string, vals, sep); + } + } + } + } + + /** + * Returns the values specified for a given commandline parameter + * --=,* + */ + void get_cmd_line_argument_pairs(const char* arg_name, + std::vector >& tokens, + char delim = ',', + char sep = ':') const { + if (check_cmd_line_flag(arg_name)) { + std::string value; + get_cmd_line_argument(arg_name, value); + + tokenize(tokens, value, delim, sep); + } + } + + /** + * Returns a list of ranges specified for a given commandline parameter + * --=,* + */ + void get_cmd_line_argument_ranges(const char* arg_name, + std::vector >& vals, + char delim = ',', + char sep = ':') const { + std::vector ranges; + get_cmd_line_arguments(arg_name, ranges, delim); + + for (std::vector::const_iterator range = ranges.begin(); + range != ranges.end(); ++range) { + + std::vector range_vals; + separate_string(*range, range_vals, sep); + vals.push_back(range_vals); + } + } + + /** + * The number of pairs parsed + */ + int parsed_argc() const { return (int)keys.size(); } + + //------------------------------------------------------------------------- + // Utility functions + //------------------------------------------------------------------------- + + /// Tokenizes a comma-delimited list of string pairs delimited by ':' + static void tokenize(std::vector >& tokens, + std::string const& str, + char delim = ',', + char sep = ':') { + // Home-built to avoid Boost dependency + size_t s_idx = 0; + size_t d_idx = std::string::npos; + while (s_idx < str.size()) { + d_idx = str.find_first_of(delim, s_idx); + + size_t end_idx = (d_idx != std::string::npos ? d_idx : str.size()); + size_t sep_idx = str.find_first_of(sep, s_idx); + size_t offset = 1; + if (sep_idx == std::string::npos || sep_idx >= end_idx) { + sep_idx = end_idx; + offset = 0; + } + + std::pair item( + str.substr(s_idx, sep_idx - s_idx), + str.substr(sep_idx + offset, end_idx - sep_idx - offset)); + + tokens.push_back(item); + s_idx = end_idx + 1; + } + } + + /// Tokenizes a comma-delimited list of string pairs delimited by ':' + static void tokenize(std::vector& tokens, + std::string const& str, + char delim = ',', + char sep = ':') { + typedef std::vector > TokenVector; + typedef TokenVector::const_iterator token_iterator; + + std::vector > token_pairs; + tokenize(token_pairs, str, delim, sep); + for (token_iterator tok = token_pairs.begin(); tok != token_pairs.end(); ++tok) { + tokens.push_back(tok->first); + } + } + + template + static void separate_string(std::string const& str, + std::vector& vals, + char sep = ',') { + std::istringstream str_stream(str); + std::string::size_type old_pos = 0; + std::string::size_type new_pos = 0; + + // Iterate -delimited values + value_t val; + while ((new_pos = str.find(sep, old_pos)) != std::string::npos) { + if (new_pos != old_pos) { + str_stream.width(new_pos - old_pos); + str_stream >> val; + vals.push_back(val); + } + + // skip over delimiter + str_stream.ignore(1); + old_pos = new_pos + 1; + } + + // Read last value + str_stream >> val; + vals.push_back(val); + } +}; + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/cublas_wrappers.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/cublas_wrappers.hpp new file mode 100644 index 0000000000000000000000000000000000000000..82d56fa18faedcaa3ab8e27ade29ad8810715fac --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/cublas_wrappers.hpp @@ -0,0 +1,526 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include +#include + +//-- BLAM_DEBUG_OUT --------------------------------------------------------- +#ifdef BLAM_DEBUG +# include +# ifndef BLAM_DEBUG_OUT +# define BLAM_DEBUG_OUT(msg) std::cerr << "BLAM: " << msg << std::endl +# define BLAM_DEBUG_OUT_2(msg) std::cerr << msg << std::endl +# endif // BLAM_DEBUG_OUT +#else +# ifndef BLAM_DEBUG_OUT +# define BLAM_DEBUG_OUT(msg) +# define BLAM_DEBUG_OUT_2(msg) +# endif // BLAM_DEBUG_OUT +#endif // BLAM_DEBUG + +// User could potentially define ComplexFloat/ComplexDouble instead of std:: +#ifndef BLAM_COMPLEX_TYPES +#define BLAM_COMPLEX_TYPES 1 +#include +namespace blam { +template +using Complex = cuda::std::complex; +using ComplexFloat = cuda::std::complex; +using ComplexDouble = cuda::std::complex; +} +#endif // BLAM_COMPLEX_TYPES + +// User could potentially define Half instead of cute:: +#ifndef BLAM_HALF_TYPE +#define BLAM_HALF_TYPE 1 +#include +namespace blam { +using Half = cute::half_t; +} +#endif // BLAM_HALF_TYPE + +namespace blam +{ +namespace cublas +{ + +inline const char* +cublas_get_error(cublasStatus_t status) +{ + switch (status) { + case CUBLAS_STATUS_SUCCESS: + return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: + return "CUBLAS_STATUS_NOT_INITIALIZED -- The cuBLAS library was not initialized."; + case CUBLAS_STATUS_ALLOC_FAILED: + return "CUBLAS_STATUS_ALLOC_FAILED -- Resource allocation failed inside the cuBLAS library."; + case CUBLAS_STATUS_INVALID_VALUE: + return "CUBLAS_STATUS_INVALID_VALUE -- An unsupported value or parameter was passed to the function."; + case CUBLAS_STATUS_ARCH_MISMATCH: + return "CUBLAS_STATUS_ARCH_MISMATCH -- The function requires a feature absent from the device architecture."; + case CUBLAS_STATUS_MAPPING_ERROR: + return "CUBLAS_STATUS_MAPPING_ERROR -- An access to GPU memory space failed."; + case CUBLAS_STATUS_EXECUTION_FAILED: + return "CUBLAS_STATUS_EXECUTION_FAILED -- The GPU program failed to execute."; + case CUBLAS_STATUS_INTERNAL_ERROR: + return "CUBLAS_STATUS_INTERNAL_ERROR -- An internal cuBLAS operation failed."; + case CUBLAS_STATUS_NOT_SUPPORTED: + return "CUBLAS_STATUS_NOT_SUPPORTED -- The functionality requested is not supported."; + case CUBLAS_STATUS_LICENSE_ERROR: + return "CUBLAS_STATUS_LICENSE_ERROR -- An error was detected when checking the current licensing."; + default: + return "CUBLAS_ERROR -- "; + } +} + +inline bool +cublas_is_error(cublasStatus_t status) +{ + return status != CUBLAS_STATUS_SUCCESS; +} + + +// hgemm +inline cublasStatus_t +gemm(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const Half* alpha, + const Half* A, int ldA, + const Half* B, int ldB, + const Half* beta, + Half* C, int ldC) +{ + BLAM_DEBUG_OUT("cublasHgemm"); + + return cublasGemmEx(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), CUDA_R_16F, ldA, + reinterpret_cast(B), CUDA_R_16F, ldB, + reinterpret_cast(beta), + reinterpret_cast< __half*>(C), CUDA_R_16F, ldC, + CUDA_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); +} + +// mixed hf gemm +inline cublasStatus_t +gemm(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const float* alpha, + const Half* A, int ldA, + const Half* B, int ldB, + const float* beta, + float* C, int ldC) +{ + BLAM_DEBUG_OUT("cublasGemmEx mixed half-float"); + + return cublasGemmEx(handle, transA, transB, + m, n, k, + alpha, + reinterpret_cast(A), CUDA_R_16F, ldA, + reinterpret_cast(B), CUDA_R_16F, ldB, + beta, + C, CUDA_R_32F, ldC, + CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); +} + +// igemm +inline cublasStatus_t +gemm(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const int32_t* alpha, + const int8_t* A, int ldA, + const int8_t* B, int ldB, + const int32_t* beta, + int32_t* C, int ldC) +{ + BLAM_DEBUG_OUT("cublasIgemm"); + + return cublasGemmEx(handle, transA, transB, + m, n, k, + alpha, + A, CUDA_R_8I, ldA, + B, CUDA_R_8I, ldB, + beta, + C, CUDA_R_32I, ldC, + CUDA_R_32I, CUBLAS_GEMM_DEFAULT_TENSOR_OP); +} + +// sgemm +inline cublasStatus_t +gemm(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const float* alpha, + const float* A, int ldA, + const float* B, int ldB, + const float* beta, + float* C, int ldC) +{ + BLAM_DEBUG_OUT("cublasSgemm"); + + return cublasSgemm(handle, transA, transB, + m, n, k, + alpha, + A, ldA, + B, ldB, + beta, + C, ldC); +} + +// dgemm +inline cublasStatus_t +gemm(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const double* alpha, + const double* A, int ldA, + const double* B, int ldB, + const double* beta, + double* C, int ldC) +{ + BLAM_DEBUG_OUT("cublasDgemm"); + + return cublasDgemm(handle, transA, transB, + m, n, k, + alpha, + A, ldA, + B, ldB, + beta, + C, ldC); +} + +// cgemm +inline cublasStatus_t +gemm(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const ComplexFloat* alpha, + const ComplexFloat* A, int ldA, + const ComplexFloat* B, int ldB, + const ComplexFloat* beta, + ComplexFloat* C, int ldC) +{ + BLAM_DEBUG_OUT("cublasCgemm"); + + return cublasCgemm(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), ldA, + reinterpret_cast(B), ldB, + reinterpret_cast(beta), + reinterpret_cast(C), ldC); +} + +// zgemm +inline cublasStatus_t +gemm(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const ComplexDouble* alpha, + const ComplexDouble* A, int ldA, + const ComplexDouble* B, int ldB, + const ComplexDouble* beta, + ComplexDouble* C, int ldC) +{ + BLAM_DEBUG_OUT("cublasZgemm"); + + return cublasZgemm(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), ldA, + reinterpret_cast(B), ldB, + reinterpret_cast(beta), + reinterpret_cast(C), ldC); +} + +// hgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const Half* alpha, + const Half* A, int ldA, int loA, + const Half* B, int ldB, int loB, + const Half* beta, + Half* C, int ldC, int loC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasHgemmStridedBatched"); + + return cublasHgemmStridedBatched(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), ldA, loA, + reinterpret_cast(B), ldB, loB, + reinterpret_cast(beta), + reinterpret_cast<__half*>(C), ldC, loC, + batch_size); +} + +// sgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const float* alpha, + const float* A, int ldA, int loA, + const float* B, int ldB, int loB, + const float* beta, + float* C, int ldC, int loC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasSgemmStridedBatched"); + + return cublasSgemmStridedBatched(handle, transA, transB, + m, n, k, + alpha, + A, ldA, loA, + B, ldB, loB, + beta, + C, ldC, loC, + batch_size); +} + +// dgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const double* alpha, + const double* A, int ldA, int loA, + const double* B, int ldB, int loB, + const double* beta, + double* C, int ldC, int loC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasDgemmStridedBatched"); + + return cublasDgemmStridedBatched(handle, transA, transB, + m, n, k, + alpha, + A, ldA, loA, + B, ldB, loB, + beta, + C, ldC, loC, + batch_size); +} + +// cgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const ComplexFloat* alpha, + const ComplexFloat* A, int ldA, int loA, + const ComplexFloat* B, int ldB, int loB, + const ComplexFloat* beta, + ComplexFloat* C, int ldC, int loC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasCgemmStridedBatched"); + + return cublasCgemmStridedBatched(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), ldA, loA, + reinterpret_cast(B), ldB, loB, + reinterpret_cast(beta), + reinterpret_cast(C), ldC, loC, + batch_size); +} + +// zgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const ComplexDouble* alpha, + const ComplexDouble* A, int ldA, int loA, + const ComplexDouble* B, int ldB, int loB, + const ComplexDouble* beta, + ComplexDouble* C, int ldC, int loC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasZgemmStridedBatched"); + + return cublasZgemmStridedBatched(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), ldA, loA, + reinterpret_cast(B), ldB, loB, + reinterpret_cast(beta), + reinterpret_cast(C), ldC, loC, + batch_size); +} + +// hgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const Half* alpha, + const Half* const A[], int ldA, + const Half* const B[], int ldB, + const Half* beta, + Half* const C[], int ldC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasHgemmBatched"); + + return cublasHgemmBatched(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(const_cast(A)), ldA, + // A, ldA, // cuBLAS 9.2 + reinterpret_cast(const_cast(B)), ldB, + // B, ldB, // cuBLAS 9.2 + reinterpret_cast(beta), + reinterpret_cast<__half**>(const_cast(C)), ldC, + // C, ldC, // cuBLAS 9.2 + batch_size); +} + +// sgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const float* alpha, + const float* const A[], int ldA, + const float* const B[], int ldB, + const float* beta, + float* const C[], int ldC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasSgemmBatched"); + + return cublasSgemmBatched(handle, transA, transB, + m, n, k, + alpha, + const_cast(A), ldA, + // A, ldA, // cuBLAS 9.2 + const_cast(B), ldB, + // B, ldB, // cuBLAS 9.2 + beta, + const_cast(C), ldC, + // C, ldC, // cuBLAS 9.2 + batch_size); +} + +// dgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const double* alpha, + const double* const A[], int ldA, + const double* const B[], int ldB, + const double* beta, + double* const C[], int ldC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasDgemmBatched"); + + return cublasDgemmBatched(handle, transA, transB, + m, n, k, + alpha, + const_cast(A), ldA, + // A, ldA, // cuBLAS 9.2 + const_cast(B), ldB, + // B, ldB, // cuBLAS 9.2 + beta, + const_cast(C), ldC, + // C, ldC, // cuBLAS 9.2 + batch_size); +} + +// cgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const ComplexFloat* alpha, + const ComplexFloat* const A[], int ldA, + const ComplexFloat* const B[], int ldB, + const ComplexFloat* beta, + ComplexFloat* const C[], int ldC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasCgemmBatched"); + + return cublasCgemmBatched(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + const_cast(reinterpret_cast(A)), ldA, + //reinterpret_cast(A), ldA, // cuBLAS 9.2 + const_cast(reinterpret_cast(B)), ldB, + //reinterpret_cast(B), ldB, // cuBLAS 9.2 + reinterpret_cast(beta), + const_cast(reinterpret_cast(C)), ldC, + //reinterpret_cast(C), ldC, // cuBLAS 9.2 + batch_size); +} + +// zgemm +inline cublasStatus_t +gemm_batch(cublasHandle_t handle, + cublasOperation_t transA, cublasOperation_t transB, + int m, int n, int k, + const ComplexDouble* alpha, + const ComplexDouble* const A[], int ldA, + const ComplexDouble* const B[], int ldB, + const ComplexDouble* beta, + ComplexDouble* const C[], int ldC, + int batch_size) +{ + BLAM_DEBUG_OUT("cublasZgemmBatched"); + + return cublasZgemmBatched(handle, transA, transB, + m, n, k, + reinterpret_cast(alpha), + const_cast(reinterpret_cast(A)), ldA, + //reinterpret_cast(A), ldA, // cuBLAS 9.2 + const_cast(reinterpret_cast(B)), ldB, + //reinterpret_cast(B), ldB, // cuBLAS 9.2 + reinterpret_cast(beta), + const_cast(reinterpret_cast(C)), ldC, + //reinterpret_cast(C), ldC, // cuBLAS 9.2 + batch_size); +} + +} // end namespace cublas +} // end namespace blam diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/debug.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..3a2480c8da275a75750272256a0095cb78dcd7b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/debug.h @@ -0,0 +1,143 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Contains code for debugging cutlass code +*/ + +#pragma once + +#include "device_dump.h" + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/****************************************************************************** + * Debug and logging macros + ******************************************************************************/ + +/** + * Formats and prints the given message to stdout + */ +#if !defined(CUDA_LOG) +#if !defined(__CUDA_ARCH__) +#define CUDA_LOG(format, ...) printf(format, __VA_ARGS__) +#else +#define CUDA_LOG(format, ...) \ + printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \ + blockIdx.x, \ + blockIdx.y, \ + blockIdx.z, \ + threadIdx.x, \ + threadIdx.y, \ + threadIdx.z, \ + __VA_ARGS__); +#endif +#endif + +/** + * Formats and prints the given message to stdout only if DEBUG is defined + */ +#if !defined(CUDA_LOG_DEBUG) +#ifdef DEBUG +#define CUDA_LOG_DEBUG(format, ...) CUDA_LOG(format, __VA_ARGS__) +#else +#define CUDA_LOG_DEBUG(format, ...) +#endif +#endif + +/** + * \brief The corresponding error message is printed to \p stderr (or \p stdout in device code) + * along with the supplied source context. + * + * \return The CUDA error. + */ +__host__ CUTLASS_DEVICE cudaError_t cuda_perror_impl(cudaError_t error, + const char* expression, + const char* filename, + int line) { + (void)filename; + (void)line; + if (error) { +#if !defined(__CUDA_ARCH__) + fprintf( + stderr, "CUDA error %d [%s, %d] in expression '%s': %s\n", error, filename, line, expression, cudaGetErrorString(error)); + fflush(stderr); +#else + printf("CUDA error %d [%s, %d] in expression '%s'\n", error, filename, line, expression); +#endif + } + return error; +} + +/** + * \brief Perror macro + */ +#ifndef CUDA_PERROR +#define CUDA_PERROR(e) cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__) +#endif + +/** + * \brief Perror macro with exit + */ +#ifndef CUDA_PERROR_EXIT +#define CUDA_PERROR_EXIT(e) \ + do { if (cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)) { \ + exit(1); \ + } } while (0) +#endif + +/** + * \brief Perror macro only if DEBUG is defined + */ +#ifndef CUDA_PERROR_DEBUG +#ifdef DEBUG +#define CUDA_PERROR_DEBUG(e) CUDA_PERROR(e) +#else +#define CUDA_PERROR_DEBUG(e) (e) +#endif +#endif + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +// A small helper class to dump a type at compile time +// Usage:: DumpType::Class +template +struct DebugType {}; + +template +void DebugTypeFunc(T const& t) { + T::t; +} + +// A small helper class to dump a compile time constant at compile time +// Usage: DumpValue::kConstant +template +struct DebugValue {}; diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_dump.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_dump.h new file mode 100644 index 0000000000000000000000000000000000000000..7a3270d73e6dbe4a05bac5ffdf13779b673fed36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_dump.h @@ -0,0 +1,187 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include +#include "cutlass/cutlass.h" + +/** + * \file + * \brief C++ interface to dump fragments and shared memory contents for + * debugging. + */ + +namespace cutlass { +namespace debug { + +/****************************************************************************** + * Dump the fragments + ******************************************************************************/ + +/// The first N threads dump the first M elements from their fragments with a +/// stride of S elements. If N is not specified, dump the data of all the +/// threads. If M is not specified, dump all the elements of the fragment. +template +CUTLASS_DEVICE void dump_fragment(Fragment const& frag, int N = 0, int M = 0, + int S = 1) { + int total_threads = blockDim.x * blockDim.y * blockDim.z; + int block_id = + blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; + int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) + + (threadIdx.y * blockDim.x) + threadIdx.x; + + if (N < 0 || N > total_threads) { + if (thread_id == 0 && block_id == 0) + printf("Thread number N = %d should between [1, %d].\n", N, + total_threads); + + __syncthreads(); + + return; + } + + int total_elements = frag.size(); + + if (M < 0 || M > total_elements) { + if (thread_id == 0 && block_id == 0) + printf("Element number M = %d should between [1, %d].\n", M, + total_elements); + + __syncthreads(); + + return; + } + + if (N == 0) N = total_threads; + + if (M == 0) M = total_elements; + + if (S < 1 || S > M) { + if (thread_id == 0 && block_id == 0) + printf("Stride S = %d should between [1, %d].\n", S, M); + + __syncthreads(); + + return; + } + + if (thread_id == 0 && block_id == 0) + printf("\n*******************Dumping the fragments*******************\n\n"); + + CUTLASS_PRAGMA_NO_UNROLL + for (int tid = 0; tid < N; ++tid) { + if (tid == thread_id) { + printf("TB%d W%d T%d: ", block_id, tid / 32, tid & 31); + CUTLASS_PRAGMA_NO_UNROLL + for (int i = 0; i < M; i += S) { + printf("%.0f ", float(typename Fragment::value_type(frag[i]))); + } + printf("\n"); + } + + __syncthreads(); + } + + if (thread_id == 0 && block_id == 0) + printf("\n***********************************************************\n\n"); + + __syncthreads(); + + return; +} + +/****************************************************************************** + * Dump the shared memory + ******************************************************************************/ + +#define SHMEM_ROW_SIZE 128 + +/// Dump the shared memory contents. ptr is the begin address, size specifies +/// the number of elements that need to be dumped, and S specifies the stride. +template +CUTLASS_DEVICE void dump_shmem(Element const* ptr, size_t size, int S = 1) { + int block_id = + blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; + int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) + + (threadIdx.y * blockDim.x) + threadIdx.x; + + if (ptr == nullptr) { + if (thread_id == 0 && block_id == 0) printf("ptr is null.\n"); + + __syncthreads(); + return; + } + + if (size < 1) { + if (thread_id == 0 && block_id == 0) + printf("Element size is less than 1\n"); + + __syncthreads(); + + return; + } + + int row_elements = SHMEM_ROW_SIZE / sizeof(Element); + + if (S < 1 || S > row_elements) { + if (thread_id == 0 && block_id == 0) + printf("Stride S = %d should between [1, %d].\n", S, row_elements); + + __syncthreads(); + + return; + } + + __syncthreads(); + + if (thread_id == 0) + printf("\n********Dumping the shared memory of TB %d*******\n\n", block_id); + + if (thread_id == 0) { + for (int i = 0; i < size; i += row_elements) { + for (int j = 0; j < row_elements; j += S) { + printf("%.0f ", float(ptr[i + j])); + } + + printf("\n"); + } + } + + if (thread_id == 0) + printf("\n***********************************************************\n\n"); + + __syncthreads(); + + return; +} +} // namespace debug +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_groupnorm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_groupnorm.h new file mode 100644 index 0000000000000000000000000000000000000000..5b78aa64f74e03ec645e647b89f38eb006c740f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_groupnorm.h @@ -0,0 +1,402 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * \brief cuda kernels to do group norm on a device memory tensor with NHWC layout. The tensor will be divided into [N, H, W, G, C'] and then we do normalization on [H, W, C']. + */ + +#include "cutlass/cutlass.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" +#include "cutlass/tensor_ref.h" +#include "device_utils.h" +#include + +namespace cutlass { + +/** \brief interface to do group norm on a device memory tensor with NHWC layout. + * \tparam T: data type + */ +template +void groupnorm(cutlass::Tensor4DCoord input_size, + const int num_groups, + const float eps, + TensorRef ref_output, + TensorRef ref_input, + TensorRef ref_gamma, + TensorRef ref_beta, + cudaStream_t stream); + +extern __shared__ char groupnorm_shm[]; + +// For small prod_dim1_to_last_dim/num_groups, to avoid multiple loads from global memory, +// we store the input in the shared memory. +// grid(num_groups, dim0) +// block(BLOCKSIZE) +// BLOCKSIZE * TVecs_PER_THREAD <= prod_dim1_to_last_dim/num_group +template +__global__ void groupnorm_twopass_store_locally(T* output, + const T* input, + const T* gamma, + const T* beta, + int num_groups, + int prod_dim1_to_last_dim, + int last_dim, + const float eps, + const int TVecs_PER_THREAD) +{ + const int bid = blockIdx.y; // index of batch + const int gid = blockIdx.x; // index of group + const int tid = threadIdx.x; // index of thread + const int bdimx = blockDim.x; + const int s_reduce_elements = prod_dim1_to_last_dim / num_groups; + const int v_reduce_elements = s_reduce_elements / T_PER_TVec; + const int s_group_stride = last_dim / num_groups; + const int v_group_stride = s_group_stride / T_PER_TVec; + const int offset_of_group = (bid * prod_dim1_to_last_dim + gid * s_group_stride) / T_PER_TVec; + const TVec* input_TVec_ptr = (const TVec*)(input) + offset_of_group; + TVec* output_TVec_ptr = (TVec*)(output) + offset_of_group; + T* local_val = ((T*)groupnorm_shm) + TVecs_PER_THREAD * T_PER_TVec * tid; + float local_sum[1] = {0.0f}; + +// load from global memory into shared memory +#pragma unroll + for (int i = 0; i < TVecs_PER_THREAD; i += 1) { + const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; + const int offset_in_group = + ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) + / T_PER_TVec; + if (current_load_start_idx < s_reduce_elements) { + TVec tmp_vec = input_TVec_ptr[offset_in_group]; + T* tmp_vec_ptr = (T*)(&tmp_vec); + const int local_val_offset = i * T_PER_TVec; +#pragma unroll + for (int j = 0; j < T_PER_TVec; j++) { + float tmp = static_cast(tmp_vec_ptr[j]); + local_sum[0] += tmp; + local_val[local_val_offset + j] = tmp_vec_ptr[j]; + } + } + } + __shared__ float s_mean, s_variance; + + // reduction for mean + if (bdimx <= 32) { + warpReduceSum(local_sum); + } + else { + blockReduceSum(local_sum); + } + if (tid == 0) { + s_mean = local_sum[0] / s_reduce_elements; + } + __syncthreads(); + + // reduction for std + local_sum[0] = 0.0f; +#pragma unroll + for (int i = 0; i < TVecs_PER_THREAD; i += 1) { + const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; + if (current_load_start_idx < s_reduce_elements) { + const int local_val_offset = i * T_PER_TVec; +#pragma unroll + for (int j = 0; j < T_PER_TVec; j++) { + float tmp = static_cast(local_val[local_val_offset + j]); + tmp -= s_mean; + local_sum[0] += tmp * tmp; + } + } + } + if (bdimx <= 32) { + warpReduceSum(local_sum); + } + else { + blockReduceSum(local_sum); + } + if (tid == 0) { + s_variance = rsqrtf(local_sum[0] / s_reduce_elements + eps); + } + __syncthreads(); + + // normalize + const int gamma_offset_of_group = gid * v_group_stride; + const TVec* gamma_TVec_ptr = (const TVec*)gamma + gamma_offset_of_group; + const TVec* beta_TVec_ptr = (const TVec*)beta + gamma_offset_of_group; +#pragma unroll + for (int i = 0; i < TVecs_PER_THREAD; i += 1) { + const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; + const int offset_in_group = + ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) + / T_PER_TVec; + const int gamma_offset_in_group = (current_load_start_idx % s_group_stride) / T_PER_TVec; + const int local_val_offset = i * T_PER_TVec; + if (current_load_start_idx < s_reduce_elements) { + TVec gamma_val = gamma_TVec_ptr[gamma_offset_in_group]; + TVec beta_val = beta_TVec_ptr[gamma_offset_in_group]; + T* gamma_val_ptr = (T*)(&gamma_val); + T* beta_val_ptr = (T*)(&beta_val); + TVec tmp_vec; + T* tmp_vec_ptr = (T*)(&tmp_vec); +#pragma unroll + for (int j = 0; j < T_PER_TVec; j++) { + float tmp = (static_cast(local_val[local_val_offset + j]) - s_mean) * s_variance + * static_cast(gamma_val_ptr[j]) + + static_cast(beta_val_ptr[j]); + if (sizeof(T) == sizeof(half)) { + tmp_vec_ptr[j] = T(__float2half_rn(tmp)); + } + else { + tmp_vec_ptr[j] = T(tmp); + } + } + output_TVec_ptr[offset_in_group] = tmp_vec; + } + } +} + +// For large prod_dim1_to_last_dim/num_groups, +// in which the data cannot be stored locally, +// we will load from global memory multiple times, +// grid(num_groups, dim0) +// block(BLOCKSIZE) +// BLOCKSIZE * TVecs_PER_THREAD <= prod_dim1_to_last_dim/num_group +template +__global__ void groupnorm_twopass_multiple_load(T* output, + const T* input, + const T* gamma, + const T* beta, + int num_groups, + int prod_dim1_to_last_dim, + int last_dim, + const float eps, + const int TVecs_PER_THREAD) +{ + const int bid = blockIdx.y; // index of batch + const int gid = blockIdx.x; // index of group + const int tid = threadIdx.x; // index of thread + const int bdimx = blockDim.x; + const int s_reduce_elements = prod_dim1_to_last_dim / num_groups; + const int v_reduce_elements = s_reduce_elements / T_PER_TVec; + const int s_group_stride = last_dim / num_groups; + const int v_group_stride = s_group_stride / T_PER_TVec; + const int offset_of_group = (bid * prod_dim1_to_last_dim + gid * s_group_stride) / T_PER_TVec; + const TVec* input_TVec_ptr = (const TVec*)(input) + offset_of_group; + TVec* output_TVec_ptr = (TVec*)(output) + offset_of_group; + float local_sum[1] = {0.0f}; + +#pragma unroll + for (int i = 0; i < TVecs_PER_THREAD; i += 1) { + const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; + if (current_load_start_idx < s_reduce_elements) { + const int offset_in_group = + ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) + / T_PER_TVec; + TVec tmp_vec = input_TVec_ptr[offset_in_group]; + T* tmp_vec_ptr = (T*)(&tmp_vec); +#pragma unroll + for (int j = 0; j < T_PER_TVec; j++) { + float tmp = static_cast(tmp_vec_ptr[j]); + local_sum[0] += tmp; + } + } + } + __shared__ float s_mean, s_variance; + + // reduction for mean + if (bdimx <= 32) { + warpReduceSum(local_sum); + } + else { + blockReduceSum(local_sum); + } + if (tid == 0) { + s_mean = local_sum[0] / s_reduce_elements; + } + __syncthreads(); + + // reduction for std + local_sum[0] = 0.0f; +#pragma unroll + for (int i = 0; i < TVecs_PER_THREAD; i += 1) { + const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; + if (current_load_start_idx < s_reduce_elements) { + const int offset_in_group = + ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) + / T_PER_TVec; + TVec tmp_vec = input_TVec_ptr[offset_in_group]; + T* tmp_vec_ptr = (T*)(&tmp_vec); +#pragma unroll + for (int j = 0; j < T_PER_TVec; j++) { + float tmp = static_cast(tmp_vec_ptr[j]); + tmp -= s_mean; + local_sum[0] += tmp * tmp; + } + } + } + if (bdimx <= 32) { + warpReduceSum(local_sum); + } + else { + blockReduceSum(local_sum); + } + if (tid == 0) { + s_variance = rsqrtf(local_sum[0] / s_reduce_elements + eps); + } + __syncthreads(); + + // normalize + const int gamma_offset_of_group = gid * v_group_stride; + const TVec* gamma_TVec_ptr = (const TVec*)gamma + gamma_offset_of_group; + const TVec* beta_TVec_ptr = (const TVec*)beta + gamma_offset_of_group; +#pragma unroll + for (int i = 0; i < TVecs_PER_THREAD; i += 1) { + const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; + if (current_load_start_idx < s_reduce_elements) { + const int offset_in_group = + ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) + / T_PER_TVec; + const int gamma_offset_in_group = (current_load_start_idx % s_group_stride) / T_PER_TVec; + TVec gamma_val = gamma_TVec_ptr[gamma_offset_in_group]; + TVec beta_val = beta_TVec_ptr[gamma_offset_in_group]; + T* gamma_val_ptr = (T*)(&gamma_val); + T* beta_val_ptr = (T*)(&beta_val); + TVec tmp_vec = input_TVec_ptr[offset_in_group]; + T* tmp_vec_ptr = (T*)(&tmp_vec); + TVec output_tmp_vec; + T* output_tmp_vec_ptr = (T*)(&output_tmp_vec); +#pragma unroll + for (int j = 0; j < T_PER_TVec; j++) { + float tmp = + (static_cast(tmp_vec_ptr[j]) - s_mean) * s_variance * static_cast(gamma_val_ptr[j]) + + static_cast(beta_val_ptr[j]); + if (sizeof(T) == sizeof(half)) { + output_tmp_vec_ptr[j] = T(__float2half_rn(tmp)); + } + else { + output_tmp_vec_ptr[j] = T(tmp); + } + } + output_TVec_ptr[offset_in_group] = output_tmp_vec; + } + } +} + +//ref_input & ref_output should be [N, H, W, C] +//ref_gamma & ref_beta should be [1, 1, 1, C] +template +void groupnorm(cutlass::Tensor4DCoord input_size, + const int num_groups, + const float eps, + TensorRef ref_output, + TensorRef ref_input, + TensorRef ref_gamma, + TensorRef ref_beta, + cudaStream_t stream){ + const int N = input_size.n(); + const int H = input_size.h(); + const int W = input_size.w(); + const int C = input_size.c(); + if (C % num_groups != 0){ + printf("[ERROR] C should be a multiple of num_groups.\n"); + } + T* output = ref_output.data(); + const T* input = ref_input.data(); + const T* gamma = ref_gamma.data(); + const T* beta = ref_beta.data(); + + const int dim0 = N; + const int last_dim = C; + const int prod_dim1_to_last_dim = H*W*C; + const int s_reduce_elements = prod_dim1_to_last_dim / num_groups; + const int s_group_stride = last_dim / num_groups; + dim3 grid(num_groups, dim0); + int threadblock_size = 32; + if (s_group_stride % 2 == 0) { + const int T_PER_TVec = 2; + while (threadblock_size < 1024) { + if (s_reduce_elements / T_PER_TVec / threadblock_size <= 8) + break; + threadblock_size *= 2; + } + dim3 block(threadblock_size); + const int TVec_PER_THREAD = (s_reduce_elements / T_PER_TVec + threadblock_size - 1) / threadblock_size; + const int shm_size = T_PER_TVec * TVec_PER_THREAD * threadblock_size * sizeof(T); + // for small s_reduce_elements, specific case for H=W=22, C=1280, num_groups=32; + // the size of grid & block may have better choice for different cases. + // ensure shared memory is smaller than 48KB + if (std::is_same::value){ + if (shm_size < 48 * 1024) { + groupnorm_twopass_store_locally<<>>( + output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); + } + else { + groupnorm_twopass_multiple_load<<>>( + output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); + } + } + else{ + if (shm_size < 48 * 1024) { + groupnorm_twopass_store_locally<<>>( + output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); + } + else { + groupnorm_twopass_multiple_load<<>>( + output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); + } + } + } + else { + const int T_PER_TVec = 1; + while (threadblock_size < 1024) { + if (s_reduce_elements / T_PER_TVec / threadblock_size <= 8) + break; + threadblock_size *= 2; + } + dim3 block(threadblock_size); + const int TVec_PER_THREAD = (s_reduce_elements / T_PER_TVec + threadblock_size - 1) / threadblock_size; + const int shm_size = T_PER_TVec * TVec_PER_THREAD * threadblock_size * sizeof(T); + if (shm_size < 48 * 1024) { + groupnorm_twopass_store_locally<<>>( + output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); + } + else { + groupnorm_twopass_multiple_load<<>>( + output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); + } + } + +} + +} //namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_layernorm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_layernorm.h new file mode 100644 index 0000000000000000000000000000000000000000..c4ec9251bb3d9bf2581bb64622768a8091d7c265 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_layernorm.h @@ -0,0 +1,644 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * \brief cuda kernels to do layernorm on a device memory tensor with RowMajor layout. + */ + +#include "cutlass/cutlass.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" +#include "cutlass/tensor_ref.h" +#include "device_utils.h" +#include + +namespace cutlass { + +/** \brief interface to do layernorm on a device memory tensor with RowMajor layout. + * \tparam T: data type + */ +template +void layernorm(cutlass::MatrixCoord tensor_size, + TensorRef ref_output, + TensorRef ref_input, + TensorRef ref_gamma, + TensorRef ref_beta, + cudaStream_t stream); + +/** + * output [m, n] row-major + * input [m, n] row-major + * gamma [n] + * beta [n] + * grid(m) + * block(block_size) -- each block deals with n elements ; each thread deals with ITEM_PER_THREAD elements +*/ +template +__global__ void layernorm_twoPassAlgo_stored_locally_e1(T* output, + const T* input, + const T* gamma, + const T* beta, + const int m, + const int n) +{ + const int m_idx = blockIdx.x; + const int tid = threadIdx.x; + const int bdimx = blockDim.x; + __shared__ float s_mean, s_variance; + T local_val[ITEM_PER_THREAD]; + float local_sums[1] = {0.0f}; + int offset = m_idx * n; + input += offset; + output += offset; + + const T zero = T(0.0f); + #pragma unroll + for (int i = 0 ; i < ITEM_PER_THREAD ; i++){ + int index = tid + i*bdimx; + local_val[i] = index < n ? input[index] : zero; + local_sums[0] += static_cast(local_val[i]); + } + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_mean = local_sums[0] / n; + } + __syncthreads(); + + local_sums[0] = 0.0f; + #pragma unroll + for (int i = 0 ; i < ITEM_PER_THREAD ; i++){ + int index = tid + i*bdimx; + if (index < n){ + const float tmp = static_cast(local_val[i]) - s_mean; + local_sums[0] += tmp * tmp; + } + } + + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_variance = rsqrtf(local_sums[0] / n + 1e-5); + } + __syncthreads(); + + #pragma unroll + for (int i = 0 ; i < ITEM_PER_THREAD ; i++){ + int index = tid + i*bdimx; + if (index < n) { + const T gamma_val = gamma[index]; + const T beta_val = beta[index]; + output[index] = T((static_cast(local_val[i]) - s_mean) * s_variance * static_cast(gamma_val) + static_cast(beta_val)); + } + } +} + +/** + * output [m, n] row-major + * input [m, n] row-major + * gamma [n] + * beta [n] + * grid(m) + * block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*2 elements; +*/ +template +__global__ void layernorm_twoPassAlgo_stored_locally_e2(T2* output, + const T2* input, + const T2* gamma, + const T2* beta, + const int m, + const int n) +{ + const int m_idx = blockIdx.x; + const int tid = threadIdx.x; + const int bdimx = blockDim.x; + __shared__ float s_mean, s_variance; + float local_sums[1] = {0.0f}; + T2 local_val[ITEM_PER_THREAD]; + const int n_2 = n / 2; + int offset = m_idx * n_2; + input += offset; + output += offset; + + const T2 zero = {T(0.0f), T(0.0f)}; + #pragma UNROLL + for (int i = 0; i < ITEM_PER_THREAD; i += 1) { + const int index = i*bdimx + tid; + local_val[i] = index < n_2 ? input[index] : zero; + local_sums[0] += static_cast(local_val[i].x) + static_cast(local_val[i].y); + } + + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_mean = local_sums[0] / n; + } + __syncthreads(); + + local_sums[0] = 0.0f; + #pragma UNROLL + for (int i = 0; i < ITEM_PER_THREAD; i += 1) { + const int index = i*bdimx + tid; + if (index < n_2){ + const float2 tmp = {static_cast(local_val[i].x) - s_mean, + static_cast(local_val[i].y) - s_mean}; + local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y; + } + } + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_variance = rsqrtf(local_sums[0] / n + 1e-5); + } + __syncthreads(); + + #pragma UNROLL + for (int i = 0; i < ITEM_PER_THREAD; i += 1) { + const int index = i*bdimx + tid; + if (index < n_2){ + const T2 gamma_val = gamma[index]; + const T2 beta_val = beta[index]; + T2 tmp; + tmp.x = T((static_cast(local_val[i].x) - s_mean)*s_variance*static_cast(gamma_val.x) + static_cast(beta_val.x)); + tmp.y = T((static_cast(local_val[i].y) - s_mean)*s_variance*static_cast(gamma_val.y) + static_cast(beta_val.y)); + output[index] = tmp; + } + } +} + +/** + * output [m, n] row-major + * input [m, n] row-major + * gamma [n] + * beta [n] + * grid(m) + * block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*4 elements; +*/ +template +__global__ void layernorm_twoPassAlgo_stored_locally_e4(T4* output, + const T4* input, + const T4* gamma, + const T4* beta, + const int m, + const int n) +{ + const int m_idx = blockIdx.x; + const int tid = threadIdx.x; + const int bdimx = blockDim.x; + __shared__ float s_mean, s_variance; + float local_sums[1] = {0.0f}; + T4 local_val[ITEM_PER_THREAD]; + const int n_4 = n / 4; + int offset = m_idx * n_4; + input += offset; + output += offset; + + const T4 zero = {T(0.0f), T(0.0f), T(0.0f), T(0.0f)}; + #pragma UNROLL + for (int i = 0; i < ITEM_PER_THREAD; i += 1) { + const int index = i*bdimx + tid; + local_val[i] = index < n_4 ? input[index] : zero; + local_sums[0] += static_cast(local_val[i].x) + static_cast(local_val[i].y) + + static_cast(local_val[i].z) + static_cast(local_val[i].w); + } + + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_mean = local_sums[0] / n; + } + __syncthreads(); + + local_sums[0] = 0.0f; + #pragma UNROLL + for (int i = 0; i < ITEM_PER_THREAD; i += 1) { + const int index = i*bdimx + tid; + if (index < n_4){ + const float4 tmp = {static_cast(local_val[i].x) - s_mean, + static_cast(local_val[i].y) - s_mean, + static_cast(local_val[i].z) - s_mean, + static_cast(local_val[i].w) - s_mean}; + local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y + tmp.z * tmp.z + tmp.w * tmp.w; + } + } + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_variance = rsqrtf(local_sums[0] / n + 1e-5); + } + __syncthreads(); + + #pragma UNROLL + for (int i = 0; i < ITEM_PER_THREAD; i += 1) { + const int index = i*bdimx + tid; + if (index < n_4){ + const T4 gamma_val = gamma[index]; + const T4 beta_val = beta[index]; + T4 tmp; + tmp.x = T((static_cast(local_val[i].x) - s_mean)*s_variance*static_cast(gamma_val.x) + static_cast(beta_val.x)); + tmp.y = T((static_cast(local_val[i].y) - s_mean)*s_variance*static_cast(gamma_val.y) + static_cast(beta_val.y)); + tmp.z = T((static_cast(local_val[i].z) - s_mean)*s_variance*static_cast(gamma_val.z) + static_cast(beta_val.z)); + tmp.w = T((static_cast(local_val[i].w) - s_mean)*s_variance*static_cast(gamma_val.w) + static_cast(beta_val.w)); + output[index] = tmp; + } + } +} + +/** + * output [m, n] row-major + * input [m, n] row-major + * gamma [n] + * beta [n] + * grid(m) + * block(block_size) -- each block deals with n elements ; each thread deals with ITEM_PER_THREAD elements +*/ +template +__global__ void layernorm_twoPassAlgo_e1(T* output, + const T* input, + const T* gamma, + const T* beta, + const int m, + const int n) +{ + const int m_idx = blockIdx.x; + const int tid = threadIdx.x; + const int bdimx = blockDim.x; + __shared__ float s_mean, s_variance; + float local_sums[1] = {0.0f}; + int offset = m_idx * n; + input += offset; + output += offset; + + for (int index = tid ; index < n ; index += bdimx){ + float local_val = static_cast(input[index]); + local_sums[0] += local_val; + } + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_mean = local_sums[0] / n; + } + __syncthreads(); + + local_sums[0] = 0.0f; + for (int index = tid ; index < n ; index += bdimx){ + float local_val = static_cast(input[index]); + local_val = local_val - s_mean; + local_sums[0] += local_val * local_val; + } + + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_variance = rsqrtf(local_sums[0] / n + 1e-5); + } + __syncthreads(); + + for (int index = tid ; index < n ; index += bdimx){ + const T gamma_val = gamma[index]; + const T beta_val = beta[index]; + const T local_val = input[index]; + output[index] = T((static_cast(local_val) - s_mean) * s_variance * static_cast(gamma_val) + static_cast(beta_val)); + } +} + +/** + * output [m, n] row-major + * input [m, n] row-major + * gamma [n] + * beta [n] + * grid(m) + * block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*2 elements; +*/ +template +__global__ void layernorm_twoPassAlgo_e2(T2* output, + const T2* input, + const T2* gamma, + const T2* beta, + const int m, + const int n) +{ + const int m_idx = blockIdx.x; + const int tid = threadIdx.x; + const int bdimx = blockDim.x; + __shared__ float s_mean, s_variance; + float local_sums[1] = {0.0f}; + const int n_2 = n / 2; + int offset = m_idx * n_2; + input += offset; + output += offset; + + for (int index = tid; index < n_2; index += bdimx) { + const T2 local_val = input[index]; + local_sums[0] += static_cast(local_val.x) + static_cast(local_val.y); + } + + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_mean = local_sums[0] / n; + } + __syncthreads(); + + local_sums[0] = 0.0f; + for (int index = tid; index < n_2; index += bdimx) { + const T2 local_val = input[index]; + const float2 tmp = {static_cast(local_val.x) - s_mean, + static_cast(local_val.y) - s_mean}; + local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y; + } + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_variance = rsqrtf(local_sums[0] / n + 1e-5); + } + __syncthreads(); + + for (int index = tid; index < n_2; index += bdimx) { + const T2 local_val = input[index]; + const T2 gamma_val = gamma[index]; + const T2 beta_val = beta[index]; + T2 tmp; + tmp.x = T((static_cast(local_val.x) - s_mean)*s_variance*static_cast(gamma_val.x) + static_cast(beta_val.x)); + tmp.y = T((static_cast(local_val.y) - s_mean)*s_variance*static_cast(gamma_val.y) + static_cast(beta_val.y)); + output[index] = tmp; + } +} + +template +void layernorm(cutlass::MatrixCoord tensor_size, + TensorRef ref_output, + TensorRef ref_input, + TensorRef ref_gamma, + TensorRef ref_beta, + cudaStream_t stream){ + const int m = tensor_size.row(); + const int n = tensor_size.column(); + T* output = ref_output.data(); + const T* input = ref_input.data(); + const T* gamma = ref_gamma.data(); + const T* beta = ref_beta.data(); + dim3 grid(m); + dim3 block((n + 31)/32*32); + if (block.x > 1024){ + block.x = 1024; + } + // TODO : There should be better configs for different cases, we only use several samples to show how to use here + // TODO : using registers to store values locally can reduce the loads from global memory and speedup the kernels. + if ((n % 4 == 0) && (n >= 128) && (n <= 4096)) { + block.x = (n/4 + 31)/32*32; + if (std::is_same::value) { + layernorm_twoPassAlgo_stored_locally_e4<<>>( + (float4*)output, + (const float4*)input, + (const float4*)gamma, + (const float4*)beta, + m, + n); + } // if (std::is_same::value) + else { + layernorm_twoPassAlgo_stored_locally_e4<<>>( + (half4*)output, + (const half4*)input, + (const half4*)gamma, + (const half4*)beta, + m, + n); + } + } //if ((n % 4 == 0) && (n >= 128) && (n <= 4096)) + else if (n % 2 == 0) { + if (n / 2 <= 1024) { + block.x = (n/2 + 31)/32*32; + if (std::is_same::value) { + layernorm_twoPassAlgo_stored_locally_e2<<>>( + (float2*)output, + (const float2*)input, + (const float2*)gamma, + (const float2*)beta, + m, + n); + } //if (std::is_same::value) + else { + layernorm_twoPassAlgo_stored_locally_e2<<>>( + (half2*)output, + (const half2*)input, + (const half2*)gamma, + (const half2*)beta, + m, + n); + } + } // if (n / 2 <= 1024) + else if (n <= 8192) { + block.x = ((n + 7)/8 + 31)/32*32; + if (std::is_same::value) { + layernorm_twoPassAlgo_stored_locally_e2<<>>( + (float2*)output, + (const float2*)input, + (const float2*)gamma, + (const float2*)beta, + m, + n); + } // if (std::is_same::value) + else { + layernorm_twoPassAlgo_stored_locally_e2<<>>( + (half2*)output, + (const half2*)input, + (const half2*)gamma, + (const half2*)beta, + m, + n); + } + } // if (n <= 8192) + else if (n <= 16384) { + block.x = ((n + 15)/ 16 + 31)/32*32; + if (std::is_same::value) { + layernorm_twoPassAlgo_stored_locally_e2<<>>( + (float2*)output, + (const float2*)input, + (const float2*)gamma, + (const float2*)beta, + m, + n); + } // if (std::is_same::value) + else { + layernorm_twoPassAlgo_stored_locally_e2<<>>( + (half2*)output, + (const half2*)input, + (const half2*)gamma, + (const half2*)beta, + m, + n); + } + } // if (n <= 16384) + else if (n <= 32768) { + block.x = ((n + 31)/32 + 31)/32*32; + if (std::is_same::value) { + layernorm_twoPassAlgo_stored_locally_e2<<>>( + (float2*)output, + (const float2*)input, + (const float2*)gamma, + (const float2*)beta, + m, + n); + } // if (std::is_same::value) + else { + layernorm_twoPassAlgo_stored_locally_e2<<>>( + (half2*)output, + (const half2*)input, + (const half2*)gamma, + (const half2*)beta, + m, + n); + } + } // if (n <= 32768) + else { + if (block.x > 512) + block.x = 512; + if (std::is_same::value) { + layernorm_twoPassAlgo_e2<<>>( + (float2 *)output, + (const float2 *)input, + (const float2 *)gamma, + (const float2 *)beta, + m, + n); + } // if (std::is_same::value) + else { + layernorm_twoPassAlgo_e2<<>>( + (half2 *)output, + (const half2 *)input, + (const half2 *)gamma, + (const half2 *)beta, + m, + n); + } + } + } // if (n % 2 == 0) + else { + if (n <= 1024) { + layernorm_twoPassAlgo_stored_locally_e1<<>>( + output, + input, + gamma, + beta, + m, + n); + } // if (n <= 1024) + else if (n <= 8192) { + block.x = ((n + 7)/8 + 31)/32*32; + layernorm_twoPassAlgo_stored_locally_e1<<>>( + output, + input, + gamma, + beta, + m, + n); + } // if (n <= 8192) + else if (n <= 16384) { + block.x = ((n + 15)/16 + 32)/32*32; + layernorm_twoPassAlgo_stored_locally_e1<<>>( + output, + input, + gamma, + beta, + m, + n); + } // if (n <= 16384) + else if (n <= 32768) { + block.x = ((n + 31)/32 + 31)/32*32; + layernorm_twoPassAlgo_stored_locally_e1<<>>( + output, + input, + gamma, + beta, + m, + n); + } // if (n <= 32768) + else{ + if (block.x > 512) { + block.x = 512; + } + layernorm_twoPassAlgo_e1<<>>( + output, + input, + gamma, + beta, + m, + n); + } + } +} + +} //namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_memory.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_memory.h new file mode 100644 index 0000000000000000000000000000000000000000..67dfff5bdba149a8e13febbc21d16bc6ecc25796 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_memory.h @@ -0,0 +1,338 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * \brief C++ interface to CUDA device memory management functions. + */ + +#include + +#include "cutlass/platform/platform.h" +#include "cutlass/numeric_types.h" +#include "exceptions.h" + +namespace cutlass { +namespace device_memory { + +/****************************************************************************** + * Allocation lifetime + ******************************************************************************/ + +/// Allocate a buffer of \p count elements of type \p T on the current CUDA device +template +T* allocate(size_t count = 1) { + + T* ptr = 0; + size_t bytes = 0; + + bytes = count * sizeof(T); + + cudaError_t cuda_error = cudaMalloc((void**)&ptr, bytes); + + if (cuda_error != cudaSuccess) { + throw cuda_exception("Failed to allocate memory", cuda_error); + } + + return ptr; +} + +/// Free the buffer pointed to by \p ptr +template +void free(T* ptr) { + if (ptr) { + cudaError_t cuda_error = (cudaFree(ptr)); + if (cuda_error != cudaSuccess) { + throw cuda_exception("Failed to free device memory", cuda_error); + } + } +} + +/****************************************************************************** + * Data movement + ******************************************************************************/ + +template +void copy(T* dst, T const* src, size_t count, cudaMemcpyKind kind) { + size_t bytes = count * sizeof_bits::value / 8; + if (bytes == 0 && count > 0) + bytes = 1; + cudaError_t cuda_error = (cudaMemcpy(dst, src, bytes, kind)); + if (cuda_error != cudaSuccess) { + throw cuda_exception("cudaMemcpy() failed", cuda_error); + } +} + +template +void copy_to_device(T* dst, T const* src, size_t count = 1) { + copy(dst, src, count, cudaMemcpyHostToDevice); +} + +template +void copy_to_host(T* dst, T const* src, size_t count = 1) { + copy(dst, src, count, cudaMemcpyDeviceToHost); +} + +template +void copy_device_to_device(T* dst, T const* src, size_t count = 1) { + copy(dst, src, count, cudaMemcpyDeviceToDevice); +} + +template +void copy_host_to_host(T* dst, T const* src, size_t count = 1) { + copy(dst, src, count, cudaMemcpyHostToHost); +} + +/// Copies elements from device memory to host-side range +template +void insert_to_host(OutputIterator begin, OutputIterator end, T const* device_begin) { + size_t elements = end - begin; + copy_to_host(&*begin, device_begin, elements); +} + +/// Copies elements to device memory from host-side range +template +void insert_to_device(T* device_begin, InputIterator begin, InputIterator end) { + size_t elements = end - begin; + copy_to_device(device_begin, &*begin, elements); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device_memory + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +class DeviceAllocation { +public: + + /// Delete functor for CUDA device memory + struct deleter { + void operator()(T* ptr) { + cudaError_t cuda_error = (cudaFree(ptr)); + if (cuda_error != cudaSuccess) { + // noexcept + // throw cuda_exception("cudaFree() failed", cuda_error); + return; + } + } + }; + +public: + // + // Data members + // + + /// Number of elements of T allocated on the current CUDA device + size_t capacity; + + /// Smart pointer + platform::unique_ptr smart_ptr; + +public: + + // + // Static methods + // + + /// Static member to compute the number of bytes needed for a given number of elements + static size_t bytes(size_t elements) { + if (sizeof_bits::value < 8) { + size_t const kElementsPerByte = 8 / sizeof_bits::value; + return elements / kElementsPerByte; + } + else { + size_t const kBytesPerElement = sizeof_bits::value / 8; + return elements * kBytesPerElement; + } + } + +public: + + // + // Methods + // + + /// Constructor: allocates no memory + DeviceAllocation() : capacity(0) {} + + /// Constructor: allocates \p capacity elements on the current CUDA device + DeviceAllocation(size_t _capacity) : + smart_ptr(device_memory::allocate(_capacity)), capacity(_capacity) {} + + /// Constructor: allocates \p capacity elements on the current CUDA device taking ownership of the allocation + DeviceAllocation(T *ptr, size_t _capacity) : smart_ptr(ptr), capacity(_capacity) {} + + /// Copy constructor + DeviceAllocation(DeviceAllocation const &p): + smart_ptr(device_memory::allocate(p.capacity)), capacity(p.capacity) { + + device_memory::copy_device_to_device(smart_ptr.get(), p.get(), capacity); + } + + /// Move constructor + DeviceAllocation(DeviceAllocation &&p): capacity(0) { + std::swap(smart_ptr, p.smart_ptr); + std::swap(capacity, p.capacity); + } + + /// Destructor + ~DeviceAllocation() { reset(); } + + /// Returns a pointer to the managed object + T* get() const { return smart_ptr.get(); } + + /// Releases the ownership of the managed object (without deleting) and resets capacity to zero + T* release() { + capacity = 0; + return smart_ptr.release(); + } + + /// Deletes the managed object and resets capacity to zero + void reset() { + capacity = 0; + smart_ptr.reset(); + } + + /// Deletes managed object, if owned, and allocates a new object + void reset(size_t _capacity) { + reset(device_memory::allocate(_capacity), _capacity); + } + + /// Deletes managed object, if owned, and replaces its reference with a given pointer and capacity + void reset(T* _ptr, size_t _capacity) { + smart_ptr.reset(_ptr); + capacity = _capacity; + } + + /// Allocates a new buffer and copies the old buffer into it. The old buffer is then released. + void reallocate(size_t new_capacity) { + + platform::unique_ptr new_allocation(device_memory::allocate(new_capacity)); + + device_memory::copy_device_to_device( + new_allocation.get(), + smart_ptr.get(), + std::min(new_capacity, capacity)); + + std::swap(smart_ptr, new_allocation); + std::swap(new_capacity, capacity); + } + + /// Returns the number of elements + size_t size() const { + return capacity; + } + + /// Returns the number of bytes needed to store the allocation + size_t bytes() const { + return bytes(capacity); + } + + /// Returns a pointer to the object owned by *this + T* operator->() const { return smart_ptr.get(); } + + /// Returns the deleter object which would be used for destruction of the managed object. + deleter& get_deleter() { return smart_ptr.get_deleter(); } + + /// Returns the deleter object which would be used for destruction of the managed object (const) + const deleter& get_deleter() const { return smart_ptr.get_deleter(); } + + /// Copies a device-side memory allocation + DeviceAllocation & operator=(DeviceAllocation const &p) { + if (capacity != p.capacity) { + smart_ptr.reset(device_memory::allocate(p.capacity)); + capacity = p.capacity; + } + device_memory::copy_device_to_device(smart_ptr.get(), p.get(), capacity); + return *this; + } + + /// Move assignment + DeviceAllocation & operator=(DeviceAllocation && p) { + std::swap(smart_ptr, p.smart_ptr); + std::swap(capacity, p.capacity); + return *this; + } + + /// Copies the entire allocation from another location in device memory. + void copy_from_device(T const *ptr) const { + copy_from_device(ptr, capacity); + } + + /// Copies a given number of elements from device memory + void copy_from_device(T const *ptr, size_t elements) const { + device_memory::copy_device_to_device(get(), ptr, elements); + } + + void copy_to_device(T *ptr) const { + copy_to_device(ptr, capacity); + } + + void copy_to_device(T *ptr, size_t elements) const { + device_memory::copy_device_to_device(ptr, get(), elements); + } + + void copy_from_host(T const *ptr) const { + copy_from_host(ptr, capacity); + } + + void copy_from_host(T const *ptr, size_t elements) const { + device_memory::copy_to_device(get(), ptr, elements); + } + + void copy_to_host(T *ptr) const { + copy_to_host(ptr, capacity); + } + + void copy_to_host(T *ptr, size_t elements) const { + device_memory::copy_to_host(ptr, get(), elements); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace device_memory { + +/// Device allocation abstraction that tracks size and capacity +template +using allocation = cutlass::DeviceAllocation; + +} // namespace device_memory + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nchw_to_nhwc.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nchw_to_nhwc.h new file mode 100644 index 0000000000000000000000000000000000000000..8628c7a28ddb9179d3363bc1260ec2a9b957ab11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nchw_to_nhwc.h @@ -0,0 +1,141 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * \brief cuda kernels to transform a device memory tensor from NCHW layout to NHWC layout. + */ + +#include "cutlass/cutlass.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" +#include "cutlass/tensor_ref.h" + +namespace cutlass { + +/** \brief interface to transform a device memory tensor from NCHW layout to NHWC layout. + * \tparam T: data type + */ +template +void nchw_to_nhwc(cutlass::Tensor4DCoord input_tensor_size, + cutlass::Tensor4DCoord output_tensor_size, + TensorRef ref_input, + TensorRef ref_output, + cudaStream_t stream); + +template +__global__ void nchw_to_nhwc_kernel(T *output, + const T *input, + const int n, + const int h, + const int w, + const int c) { + const int hw = h*w; + const int chw = c*hw; + __shared__ T shbuf[32 * (32 + 1)]; + const int32_t tid = threadIdx.y*blockDim.x + threadIdx.x; + const int32_t wid = tid / 32; + const int32_t lid = tid % 32; + const int32_t ni = blockIdx.z; + const int32_t ci0 = blockIdx.y * 32; + const int32_t hwi0 = blockIdx.x * 32; + + const size_t input_idx = ni * chw + (ci0 + wid) * hw + hwi0; + const T *A = input + input_idx; + if (hwi0 + lid < hw) { + const int lid_x_33 = lid * 33; + if ((ci0 + 32) <= c) { + int ci = wid; // between 0 and 7 + CUTLASS_PRAGMA_UNROLL + for (int cLoopIdx = 0; cLoopIdx < 4; cLoopIdx++) { + shbuf[lid_x_33 + ci] = A[lid]; + A = &A[8 * hw]; + ci += 8; + } + } else { + for (int ci = wid; ci < 32; ci += 8) { + if ((ci + ci0) < c) { + shbuf[lid_x_33 + ci] = A[lid]; + } + A = &A[8 * hw]; + } + } + } + __syncthreads(); + + const int32_t ciOut = ci0 + lid; + output = &output[ni * chw + ciOut]; + if (ciOut < c) { + if (hwi0 + 32 < hw) { + int hwI = wid; + CUTLASS_PRAGMA_UNROLL + for (int hwLoopIdx = 0; hwLoopIdx < 4; ++hwLoopIdx) { + output[(hwi0 + hwI) * c] = shbuf[(hwI)*33 + lid]; + hwI += 8; + } + } else { + for (int hwI = wid; hwI < 32; hwI += 8) { + if (hwi0 + hwI < hw) { + output[(hwi0 + hwI) * c] = shbuf[(hwI)*33 + lid]; + } + } + } + } +} + +template +void nchw_to_nhwc(cutlass::Tensor4DCoord input_tensor_size, + cutlass::Tensor4DCoord output_tensor_size, + TensorRef ref_input, + TensorRef ref_output, + cudaStream_t stream) { + + assert( + input_tensor_size.n() == output_tensor_size.n() && + input_tensor_size.c() == output_tensor_size.h() && + input_tensor_size.h() == output_tensor_size.w() && + input_tensor_size.w() == output_tensor_size.c()); + + int n = output_tensor_size.n(); + int h = output_tensor_size.h(); + int w = output_tensor_size.w(); + int c = output_tensor_size.c(); + + dim3 grid((h*w + 31)/32, (c + 31)/32, n); + dim3 block(32, 8); + nchw_to_nhwc_kernel<<>>(ref_output.data(), ref_input.data(), + n, h, w, c); +} + +} //namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_padding.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_padding.h new file mode 100644 index 0000000000000000000000000000000000000000..c489d7d1da518cdf5817939c254aab3321caffd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_padding.h @@ -0,0 +1,276 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * \brief cuda kernels for padding in device memory with NHWC layout. + */ + +#include "cutlass/cutlass.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" +#include "cutlass/tensor_ref.h" + +namespace cutlass { + +/** \brief interface for padding in a device memory tensor with NHWC layout + * \tparam T: data type + */ +template +void nhwc_padding(cutlass::Tensor4DCoord input_tensor_size, + cutlass::Tensor4DCoord output_tensor_size, + TensorRef ref_input, + TensorRef ref_output, + cudaStream_t stream); + + +template +__global__ void nhwc_padding_kernel(const int32_t n, + const int32_t h, + const int32_t w, + const int32_t c_in, + const int32_t c_out, + const T zero, + const T *input, + T *output){ + + const int32_t idx_jump = blockDim.x * gridDim.x; + const int32_t total_elements = n * h * w * c_out; + + int32_t c_idx, w_idx, h_idx, n_idx, resudial; + + T value; + for (int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < total_elements; idx += idx_jump) { + + c_idx = idx%c_out; + if (c_idx >= c_in){ + value = zero; + } + else{ + resudial = idx/c_out; + w_idx = resudial%w; + resudial = resudial/w; + h_idx = resudial%h; + n_idx = resudial/h; + resudial = ((n_idx * h + h_idx) * w + w_idx) * c_in + c_idx; + value = input[resudial]; + } + output[idx] = value; + } +} + + +// fast kernel for c_in = 3 & c_out = 4 +template +__global__ void nhwc_padding_channel_3To4_kernel(const int32_t n, + const int32_t h, + const int32_t w, + const Tio *input, + Tio *output, + const int32_t max_output_element, + const int32_t max_input_element, + const Tio zero_io, + const Telement zero_element){ + __shared__ Tio shm[192]; + const int tidx = blockIdx.x * 192 + threadIdx.x; + const int threadidx = threadIdx.x; + + shm[threadIdx.x] = tidx >= max_input_element ? zero_io : input[tidx]; + __syncthreads(); + + const int output_offset = blockIdx.x * 256; + const int lower_bound = max_output_element < output_offset + 256 ? max_output_element : output_offset + 256; + for (int i = output_offset + threadidx, j = threadidx ; i < lower_bound ; i+=192, j+=192) + { + const Telement* shm_element = (const Telement*)shm + j*3*element_in_Tio/4; + Telement array[element_in_Tio]; + CUTLASS_PRAGMA_UNROLL + for (int k = 0 ; k < element_in_Tio ; k++) + array[k] = ((k+1)%4 == 0) ? zero_element : shm_element[(k > 3) ? (k - 1) : k]; + output[i] = *((const Tio *)array); + } +} + +// fast kernel for c_in = 3 & c_out = 8 +template +__global__ void nhwc_padding_channel_3To8_kernel(const int32_t n, + const int32_t h, + const int32_t w, + const Tio *input, + Tio *output, + const int32_t max_output_element, + const int32_t max_input_element, + const Tio zero_io, + const Telement zero_element){ + __shared__ Tio shm[192]; + const int tidx = blockIdx.x * 192 + threadIdx.x; + const int threadidx = threadIdx.x; + + shm[threadIdx.x] = tidx >= max_input_element ? zero_io : input[tidx]; + __syncthreads(); + + const int output_offset = blockIdx.x * 512; + const int lower_bound = max_output_element < output_offset + 512 ? max_output_element : output_offset + 512; + for (int i = output_offset + threadidx, j = threadidx ; i < lower_bound ; i+=192, j+=192) + { + const Telement* shm_element = (const Telement*)shm + (element_in_Tio == 4 ? j/2 : j)*3; + Telement array[element_in_Tio]; + //float + if (element_in_Tio == 4){ + CUTLASS_PRAGMA_UNROLL + for (int k = 0 ; k < element_in_Tio ; k++) + array[k] = ((j % 2) == 1) ? zero_element : ((k >= 3) ? zero_element : shm_element[k]); + } + //half + else{ + CUTLASS_PRAGMA_UNROLL + for (int k = 0 ; k < element_in_Tio ; k++) + array[k] = (k >= 3) ? zero_element : shm_element[k]; + } + output[i] = *((const Tio *)array); + } +} + +template +void nhwc_padding(cutlass::Tensor4DCoord input_tensor_size, + cutlass::Tensor4DCoord output_tensor_size, + TensorRef ref_input, + TensorRef ref_output, + cudaStream_t stream){ + assert( + input_tensor_size.n() == output_tensor_size.n() && + input_tensor_size.h() == output_tensor_size.h() && + input_tensor_size.w() == output_tensor_size.w() && + input_tensor_size.c() <= output_tensor_size.c()); + + int n = input_tensor_size.n(); + int h = input_tensor_size.h(); + int w = input_tensor_size.w(); + int c_in = input_tensor_size.c(); + int c_out = output_tensor_size.c(); + + //case 1 : channel == 3 padding to 4 or 8 + if ((c_out == 4 || c_out == 8) && c_in == 3 && (n*h*w % 8 == 0)){ + dim3 block(192); + const int nhw = n*h*w; + const int nhwc = nhw*c_in; + //for half_t + if (cutlass::sizeof_bits::value == 16){ + const int element_in_Tio = 8; + const int max_input_element = nhwc/element_in_Tio; + const int max_output_element = nhw*c_out/element_in_Tio; + const int4 zero_io = {0, 0, 0, 0}; + const half_t zero_element = static_cast(0.0f); + dim3 grid((nhwc + 192*element_in_Tio - 1)/(192*element_in_Tio)); + if (c_out == 4){ + nhwc_padding_channel_3To4_kernel<<>> + (n, h, w, + (const int4 *)ref_input.data(), + (int4 *)ref_output.data(), + max_output_element, + max_input_element, + zero_io, + zero_element); + } + else if (c_out == 8){ + nhwc_padding_channel_3To8_kernel<<>> + (n, h, w, + (const int4 *)ref_input.data(), + (int4 *)ref_output.data(), + max_output_element, + max_input_element, + zero_io, + zero_element); + } + } + //for float + else{ + const int element_in_Tio = 4; + const int max_input_element = nhwc/element_in_Tio; + const int max_output_element = nhw*c_out/element_in_Tio; + const float4 zero_io = {0.0f, 0.0f, 0.0f, 0.0f}; + const float zero_element = 0.0f; + dim3 grid((nhwc + 192*element_in_Tio - 1)/(192*element_in_Tio)); + if (c_out == 4){ + nhwc_padding_channel_3To4_kernel<<>> + (n, h, w, + (const float4 *)ref_input.data(), + (float4 *)ref_output.data(), + max_output_element, + max_input_element, + zero_io, + zero_element); + } + else if (c_out == 8){ + nhwc_padding_channel_3To8_kernel<<>> + (n, h, w, + (const float4 *)ref_input.data(), + (float4 *)ref_output.data(), + max_output_element, + max_input_element, + zero_io, + zero_element); + } + } + } + //case 2 : even channel + else if ((c_out % 2) == 0 && (c_in % 2) == 0){ + int32_t total_elements = n * h * w * c_out / 2; + int block_size = 256; + dim3 grid((total_elements + 255)/256); + dim3 block(block_size); + //for half_t + if (cutlass::sizeof_bits::value == 16){ + const __half2 zero = {0.0f, 0.0f}; + nhwc_padding_kernel<<>>(n, h, w, c_in/2, c_out/2, zero, (const __half2*)ref_input.data(), (__half2*)ref_output.data()); + } + //for float + else{ + const float2 zero = {0.0f, 0.0f}; + nhwc_padding_kernel<<>>(n, h, w, c_in/2, c_out/2, zero, (const float2*)ref_input.data(), (float2*)ref_output.data()); + } + } + //case 3 : odd channel + else{ + int32_t total_elements = n * h * w * c_out; + int block_size = 256; + dim3 grid((total_elements + 255)/256); + dim3 block(block_size); + const T zero = static_cast(0.0f); + nhwc_padding_kernel<<>>(n, h, w, c_in, c_out, zero, ref_input.data(), ref_output.data()); + } +} + + +} //namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_pooling.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..6bdf866da8f6b7a13046b99b062b007a871d769a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_pooling.h @@ -0,0 +1,576 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * \brief cuda kernels to do avg/max pooling on a device memory tensor with NHWC layout. + */ + +#include "cutlass/cutlass.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" +#include "cutlass/tensor_ref.h" +#include "device_utils.h" +#include + +namespace cutlass { + +/** \brief interface to do avg/max pooling on a device memory tensor with NHWC layout. + * \tparam T: data type + */ +template +void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size, + cutlass::Tensor4DCoord filter_tensor_size, + cutlass::Tensor4DCoord output_tensor_size, + cutlass::MatrixCoord padding, + cutlass::MatrixCoord stride, + TensorRef ref_input, + TensorRef ref_output, + int poolingType, //0 for avg pooling ; 1 for max pooling + cudaStream_t stream); + +/** get the output size of pooling + */ +inline int getOutputSize(int H_W, int padding, int kernel_size, int stride) +{ + return (H_W + 2 * padding - kernel_size) / stride + 1; +} + +/** + * input is [N, H, W, C] + * assume stride == kernel_size + * output_h = (H + 2*padding_H - kernel_H)/stride_H + * output_w = (W + 2*padding_W - kernel_W)/stride_W + * output is [N, output_h, output_w, C] + * grid(N, output_h, output_w) + * block(min(C, 256)) : + * each block deals with C elements of output when each thread deals with ((C + 255)/256 element of output) +*/ +template +__global__ void pooling_nhwc_element1_kernel(T* output, + const T* input, + const int N, + const int H, + const int W, + const int C, + const int output_H, + const int output_W, + const int kernel_H, + const int kernel_W, + const int stride_H, + const int stride_W, + const int padding_H, + const int padding_W) +{ + const int tid = threadIdx.x; + const int n_idx = blockIdx.x; + const int output_h_idx = blockIdx.y; + const int output_w_idx = blockIdx.z; + + int h_start_idx = output_h_idx * stride_H - padding_H; + int h_end_idx = h_start_idx + kernel_H; + h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx; + h_end_idx = h_end_idx > H ? H : h_end_idx; + + int w_start_idx = output_w_idx * stride_W - padding_W; + int w_end_idx = w_start_idx + kernel_W; + w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx; + w_end_idx = w_end_idx > W ? W : w_end_idx; + + input += n_idx * H * W * C; + output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C; + const int kernel_size2 = kernel_H * kernel_W; + for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) { + float pooling; + if (IS_AVG_POOLING){ + pooling = 0.0f; + } + else{ + pooling = -FLT_MAX; + } + for (int h = h_start_idx; h < h_end_idx; h++) { + for (int w = w_start_idx; w < w_end_idx; w++) { + const int idx = (h * W + w) * C; + const float tmp = static_cast(input[idx + c_idx]); + if (IS_AVG_POOLING){ + pooling = pooling + tmp; + } + else{ + pooling = pooling > tmp ? pooling : tmp; + } + } + } + + T output_val; + if (IS_AVG_POOLING){ + output_val = T(pooling/kernel_size2); + } + else{ + output_val = T(pooling); + } + output[c_idx] = output_val; + } +} + +template +__global__ void pooling_nhwc_element2_kernel(T2* output, + const T2* input, + const int N, + const int H, + const int W, + const int C, + const int output_H, + const int output_W, + const int kernel_H, + const int kernel_W, + const int stride_H, + const int stride_W, + const int padding_H, + const int padding_W) +{ + const int tid = threadIdx.x; + const int n_idx = blockIdx.x; + const int output_h_idx = blockIdx.y; + const int output_w_idx = blockIdx.z; + + int h_start_idx = output_h_idx * stride_H - padding_H; + int h_end_idx = h_start_idx + kernel_H; + h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx; + h_end_idx = h_end_idx > H ? H : h_end_idx; + + int w_start_idx = output_w_idx * stride_W - padding_W; + int w_end_idx = w_start_idx + kernel_W; + w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx; + w_end_idx = w_end_idx > W ? W : w_end_idx; + + input += n_idx * H * W * C; + output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C; + const int kernel_size2 = kernel_H * kernel_W; + for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) { + float2 pooling; + if (IS_AVG_POOLING) { + pooling = {0.0f, 0.0f}; + } + else { + pooling = {-FLT_MAX, -FLT_MAX}; + } + for (int h = h_start_idx; h < h_end_idx; h++) { + for (int w = w_start_idx; w < w_end_idx; w++) { + const int idx = (h * W + w) * C; + const T2 tmp = input[idx + c_idx]; + const float2 tmp_flt2 = {static_cast(tmp.x), static_cast(tmp.y)}; + if (IS_AVG_POOLING) { + pooling.x += tmp_flt2.x; + pooling.y += tmp_flt2.y; + } + else { + pooling.x = pooling.x > tmp_flt2.x ? pooling.x : tmp_flt2.x; + pooling.y = pooling.y > tmp_flt2.y ? pooling.y : tmp_flt2.y; + } + } + } + + T2 output_val; + if (IS_AVG_POOLING) { + output_val.x = T(pooling.x/kernel_size2); + output_val.y = T(pooling.y/kernel_size2); + } + else { + output_val.x = T(pooling.x); + output_val.y = T(pooling.y); + } + output[c_idx] = output_val; + } +} + +/** + * output [N, 1, 1, C] + * input [N, H, W, C] + * grid(C, N) + * block(block_size) -- each block deals with H*W/block_size elements; +*/ +template +__global__ void pooling_nxhTo1x1_element1_kernel( + T* output, const T* input, const int N, const int HW, const int C) +{ + const int c_idx = blockIdx.x; + const int n_idx = blockIdx.y; + float pooling[1]; + if (IS_AVG_POOLING) { + pooling[0] = 0.0f; + } + else { + pooling[0] = -FLT_MAX; + } + const size_t input_offset = n_idx * HW * C + c_idx; + input += input_offset; + const size_t output_offset = n_idx * C + c_idx; + output += output_offset; + int tid = threadIdx.x; + + for (int index = tid; index < HW; index += blockDim.x) { + float val = static_cast(input[index * C]); + if (IS_AVG_POOLING) { + pooling[0] += val; + } + else { + pooling[0] = pooling[0] > val ? pooling[0] : val; + } + } + if (blockDim.x <= 32) { + if (IS_AVG_POOLING) { + warpReduceSum(pooling); + } + else { + warpReduceMax(pooling); + } + } + else { + if (IS_AVG_POOLING) { + blockReduceSum(pooling); + } + else { + blockReduceMax(pooling); + } + } + __syncthreads(); + if (threadIdx.x == 0) { + T output_val; + if (IS_AVG_POOLING) { + output_val = T(pooling[0] / HW); + } + else { + output_val = T(pooling[0]); + } + output[0] = output_val; + } +} + + +/** + * output [N, 1, 1, C] + * input [N, H, W, C] + * grid(C/2, N) + * block(block_size) -- each thread deals with H*W/block_size * 2 elements; +*/ +template +__global__ void pooling_nxhTo1x1_element2_kernel( + T2* output, const T2* input, const int N, const int HW, const int C) +{ + const int c_idx = blockIdx.x; + const int n_idx = blockIdx.y; + float pooling[2]; + if (IS_AVG_POOLING) { + pooling[0] = pooling[1] = 0.0f; + } + else { + pooling[0] = pooling[1] = -FLT_MAX; + } + const int C_2 = C / 2; + const size_t input_offset = n_idx * HW * C_2 + c_idx; + input += input_offset; + const size_t output_offset = n_idx * C_2 + c_idx; + output += output_offset; + int tid = threadIdx.x; + + for (int index = tid; index < HW; index += blockDim.x) { + T2 val = input[index * C_2]; + float2 val_flt2 = {static_cast(val.x), static_cast(val.y)}; + if (IS_AVG_POOLING) { + pooling[0] += val_flt2.x; + pooling[1] += val_flt2.y; + } + else { + pooling[0] = pooling[0] > val_flt2.x ? pooling[0] : val_flt2.x; + pooling[1] = pooling[1] > val_flt2.y ? pooling[1] : val_flt2.y; + } + } + if (blockDim.x <= 32) { + if (IS_AVG_POOLING) { + warpReduceSum(pooling); + } + else { + warpReduceMax(pooling); + } + } + else { + if (IS_AVG_POOLING) { + blockReduceSum(pooling); + } + else { + blockReduceMax(pooling); + } + } + __syncthreads(); + if (threadIdx.x == 0) { + T2 output_val; + if (IS_AVG_POOLING) { + output_val.x = T(pooling[0] / HW); + output_val.y = T(pooling[1] / HW); + } + else { + output_val.x = T(pooling[0]); + output_val.y = T(pooling[1]); + } + output[0] = output_val; + } +} + +template +void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size, + cutlass::Tensor4DCoord filter_tensor_size, + cutlass::Tensor4DCoord output_tensor_size, + cutlass::Tensor4DCoord padding, + cutlass::MatrixCoord stride, + TensorRef ref_input, + TensorRef ref_output, + int poolingType, //0 for avg pooling ; 1 for max pooling + cudaStream_t stream) { + + assert(input_tensor_size.n() == output_tensor_size.n() && + input_tensor_size.c() == output_tensor_size.c()); + + assert(filter_tensor_size.h() == stride.row() && + filter_tensor_size.w() == stride.column()); + + const int N = input_tensor_size.n(); + const int H = input_tensor_size.h(); + const int W = input_tensor_size.w(); + const int C = input_tensor_size.c(); + const int padding_H = padding.h(); + const int padding_W = padding.w(); + const int kernel_H = filter_tensor_size.h(); + const int kernel_W = filter_tensor_size.w(); + const int stride_H = stride.row(); + const int stride_W = stride.column(); + + const int output_H = getOutputSize(H, padding_H, kernel_H, stride_H); + const int output_W = getOutputSize(W, padding_W, kernel_W, stride_W); + + assert(output_tensor_size.h() == output_H && + output_tensor_size.w() == output_W); + + if (C % 2 != 0) { + if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) { + dim3 grid(C, N); + dim3 block(256); + if (H*W < block.x){ + block.x = (H*W + 31)/32*32; + } + if (poolingType == 0) { + pooling_nxhTo1x1_element1_kernel<<>>( + ref_output.data(), + ref_input.data(), + N, + H*W, + C); + } // if (poolingType == 0) + else { + pooling_nxhTo1x1_element1_kernel<<>>( + ref_output.data(), + ref_input.data(), + N, + H*W, + C); + } + } // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) + else { + dim3 grid(N, output_H, output_W); + dim3 block(256); + if (C < block.x) { + block.x = C; + } + if (poolingType == 0) { + pooling_nhwc_element1_kernel<<>>( + ref_output.data(), + ref_input.data(), + N, + H, + W, + C, + output_H, + output_W, + kernel_H, + kernel_W, + stride_H, + stride_W, + padding_H, + padding_W); + } // if (poolingType == 0) + else { + pooling_nhwc_element1_kernel<<>>( + ref_output.data(), + ref_input.data(), + N, + H, + W, + C, + output_H, + output_W, + kernel_H, + kernel_W, + stride_H, + stride_W, + padding_H, + padding_W); + } + } + } // if (C % 2 != 0)) + else { + if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) { + dim3 grid(C/2, N); + dim3 block(256); + if (H*W < block.x){ + block.x = (H*W + 31)/32*32; + } + if (poolingType == 0) { + if (std::is_same::value) { + pooling_nxhTo1x1_element2_kernel<<>>( + (float2*)(ref_output.data()), + (const float2*)(ref_input.data()), + N, + H*W, + C); + } // if (std::is_same::value) + else { + pooling_nxhTo1x1_element2_kernel<<>>( + (half2*)(ref_output.data()), + (const half2*)(ref_input.data()), + N, + H*W, + C); + } + } // if (poolingType == 0) + else { + if (std::is_same::value) { + pooling_nxhTo1x1_element2_kernel<<>>( + (float2*)(ref_output.data()), + (const float2*)(ref_input.data()), + N, + H*W, + C); + } // if (std::is_same::value) + else { + pooling_nxhTo1x1_element2_kernel<<>>( + (half2*)(ref_output.data()), + (const half2*)(ref_input.data()), + N, + H*W, + C); + } + } + } // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) + else { + dim3 grid(N, output_H, output_W); + dim3 block(256); + if (C/2 < block.x) { + block.x = C/2; + } + if (poolingType == 0) { + if (std::is_same::value) { + pooling_nhwc_element2_kernel<<>>( + (float2*)(ref_output.data()), + (const float2*)(ref_input.data()), + N, + H, + W, + C/2, + output_H, + output_W, + kernel_H, + kernel_W, + stride_H, + stride_W, + padding_H, + padding_W); + } // if (std::is_same::value) + else { + pooling_nhwc_element2_kernel<<>>( + (half2*)(ref_output.data()), + (const half2*)(ref_input.data()), + N, + H, + W, + C/2, + output_H, + output_W, + kernel_H, + kernel_W, + stride_H, + stride_W, + padding_H, + padding_W); + } + } // if (poolingType == 0) + else { + if (std::is_same::value) { + pooling_nhwc_element2_kernel<<>>( + (float2*)(ref_output.data()), + (const float2*)(ref_input.data()), + N, + H, + W, + C/2, + output_H, + output_W, + kernel_H, + kernel_W, + stride_H, + stride_W, + padding_H, + padding_W); + } // if (std::is_same::value) + else { + pooling_nhwc_element2_kernel<<>>( + (half2*)(ref_output.data()), + (const half2*)(ref_input.data()), + N, + H, + W, + C/2, + output_H, + output_W, + kernel_H, + kernel_W, + stride_H, + stride_W, + padding_H, + padding_W); + } + } + } + } +} + +} //namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_to_nchw.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_to_nchw.h new file mode 100644 index 0000000000000000000000000000000000000000..d71fd1ef85ac1467b17692c639f50be4fcb3bf8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_to_nchw.h @@ -0,0 +1,144 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * \brief cuda kernels to transform a device memory tensor from NHWC layout to NCHW layout. + */ + +#include "cutlass/cutlass.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" +#include "cutlass/tensor_ref.h" + +namespace cutlass { + +/** \brief interface to transform a device memory tensor from NHWC layout to NCHW layout. + * \tparam T: data type + */ +template +void nhwc_to_nchw(cutlass::Tensor4DCoord input_tensor_size, + cutlass::Tensor4DCoord output_tensor_size, + TensorRef ref_input, + TensorRef ref_output, + cudaStream_t stream); + + +template +__global__ void nhwc_to_nchw_kernel(T *output, + const T *input, + const int n, + const int h, + const int w, + const int c) { + + const int hw = h*w; + const int hwc = hw*c; + __shared__ T shbuf[32 * (32 + 1)]; + const int32_t tid = threadIdx.y*blockDim.x + threadIdx.x; + const int32_t wid = tid / 32; + const int32_t lid = tid % 32; + const int32_t ni = blockIdx.z; + const int32_t hwi0 = blockIdx.y * 32; + const int32_t ci0 = blockIdx.x * 32; + + const size_t input_idx = ni * hwc + (hwi0 + wid) * c + ci0; + const T *A = input + input_idx; + if (ci0 + lid < c) { + const int lid_x_33 = lid * 33; + if ((hwi0 + 32) <= hw) { + int hwi = wid; // between 0 and 7 + CUTLASS_PRAGMA_UNROLL + for (int cLoopIdx = 0; cLoopIdx < 4; cLoopIdx++) { + shbuf[lid_x_33 + hwi] = A[lid]; + A = &A[8 * c]; + hwi += 8; + } + } else { + for (int hwi = wid; hwi < 32; hwi += 8) { + if ((hwi + hwi0) < hw) { + shbuf[lid_x_33 + hwi] = A[lid]; + } + A = &A[8 * c]; + } + } + } + __syncthreads(); + + const int32_t hwiOut = hwi0 + lid; + output = &output[ni * hwc + hwiOut]; + if (hwiOut < hw) { + if (ci0 + 32 < c) { + int cI = wid; + CUTLASS_PRAGMA_UNROLL + for (int hwLoopIdx = 0; hwLoopIdx < 4; ++hwLoopIdx) { + output[(ci0 + cI) * hw] = shbuf[(cI)*33 + lid]; + cI += 8; + } + } else { + for (int cI = wid; cI < 32; cI += 8) { + if (ci0 + cI < c) { + output[(ci0 + cI) * hw] = shbuf[(cI)*33 + lid]; + } + } + } + } +} + +template +void nhwc_to_nchw(cutlass::Tensor4DCoord input_tensor_size, + cutlass::Tensor4DCoord output_tensor_size, + TensorRef ref_input, + TensorRef ref_output, + cudaStream_t stream) { + + assert( + input_tensor_size.n() == output_tensor_size.n() && + input_tensor_size.h() == output_tensor_size.c() && + input_tensor_size.w() == output_tensor_size.h() && + input_tensor_size.c() == output_tensor_size.w()); + + int n = input_tensor_size.n(); + int h = input_tensor_size.h(); + int w = input_tensor_size.w(); + int c = input_tensor_size.c(); + + dim3 grid((c + 31)/32, (h*w + 31)/32, n); + dim3 block(32, 8); + nhwc_to_nchw_kernel<<>>(ref_output.data(), ref_input.data(), + n, h, w, c); + +} + +} //namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_rmsnorm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_rmsnorm.h new file mode 100644 index 0000000000000000000000000000000000000000..5090efa0df0001ef630562c254a172fc092c2c47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_rmsnorm.h @@ -0,0 +1,185 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/util/device_utils.h" +#include + +namespace cutlass { + +__global__ void rmsnorm_twoPassAlgo_e8(float4 *output, const float4 *input, + const float4 *weight, + const int m, const int n) { + const int m_idx = blockIdx.x; + const int tid = threadIdx.x; + const int bdimx = blockDim.x; + __shared__ float s_mean; + float local_sums[1] = {0.0f}; + const int n_8 = n / 8; + int offset = m_idx * n_8; + input += offset; + output += offset; + + for (int index = tid; index < n_8; index += bdimx) { + const float4 local_val = input[index]; + const half2 *h1 = (half2 *)&local_val.x; + const half2 *h2 = (half2 *)&local_val.y; + const half2 *h3 = (half2 *)&local_val.z; + const half2 *h4 = (half2 *)&local_val.w; + local_sums[0] += static_cast(h1->x) * static_cast(h1->x) + + static_cast(h1->y) * static_cast(h1->y) + + static_cast(h2->x) * static_cast(h2->x) + + static_cast(h2->y) * static_cast(h2->y) + + static_cast(h3->x) * static_cast(h3->x) + + static_cast(h3->y) * static_cast(h3->y) + + static_cast(h4->x) * static_cast(h4->x) + + static_cast(h4->y) * static_cast(h4->y); + } + + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_mean = rsqrtf(local_sums[0] / n + 1e-6); + } + __syncthreads(); + + for (int index = tid; index < n_8; index += bdimx) { + const float4 local_val = input[index]; + const float4 weight_val = weight[index]; + + const half2 *l1 = (half2 *)&local_val.x; + const half2 *l2 = (half2 *)&local_val.y; + const half2 *l3 = (half2 *)&local_val.z; + const half2 *l4 = (half2 *)&local_val.w; + + const half2 *g1 = (half2 *)&weight_val.x; + const half2 *g2 = (half2 *)&weight_val.y; + const half2 *g3 = (half2 *)&weight_val.z; + const half2 *g4 = (half2 *)&weight_val.w; + + float4 tmp; + half2 *h1 = (half2 *)&tmp.x; + half2 *h2 = (half2 *)&tmp.y; + half2 *h3 = (half2 *)&tmp.z; + half4 *h4 = (half4 *)&tmp.w; + + h1->x = half(static_cast(l1->x) * s_mean * static_cast(g1->x)); + h1->y = half(static_cast(l1->y) * s_mean * static_cast(g1->y)); + h2->x = half(static_cast(l2->x) * s_mean * static_cast(g2->x)); + h2->y = half(static_cast(l2->y) * s_mean * static_cast(g2->y)); + h3->x = half(static_cast(l3->x) * s_mean * static_cast(g3->x)); + h3->y = half(static_cast(l3->y) * s_mean * static_cast(g3->y)); + h4->x = half(static_cast(l4->x) * s_mean * static_cast(g4->x)); + h4->y = half(static_cast(l4->y) * s_mean * static_cast(g4->y)); + + output[index] = tmp; + } +} + +template +__global__ void rmsnorm_twoPassAlgo_e1(T* output, + const T* input, + const T* weight, + const int m, const int n) +{ + const int m_idx = blockIdx.x; + const int tid = threadIdx.x; + const int bdimx = blockDim.x; + __shared__ float s_mean; + float local_sums[1] = {0.0f}; + int offset = m_idx * n; + input += offset; + output += offset; + + for (int index = tid ; index < n ; index += bdimx){ + float local_val = static_cast(input[index]); + local_sums[0] += local_val * local_val; + } + if (blockDim.x <= 32) { + warpReduceSum(local_sums); + } + else { + blockReduceSum(local_sums); + } + if (threadIdx.x == 0) { + s_mean = rsqrtf(local_sums[0] / n + 1e-6); + } + __syncthreads(); + + for (int index = tid ; index < n ; index += bdimx){ + const T weight_val = weight[index]; + const T local_val = input[index]; + output[index] = T(static_cast(local_val) * s_mean * static_cast(weight_val)); + } +} + +template +void rmsnorm(cutlass::MatrixCoord tensor_size, + TensorRef ref_output, + TensorRef ref_input, + TensorRef ref_weight, + cudaStream_t stream){ + const int m = tensor_size.row(); + const int n = tensor_size.column(); + T* output = ref_output.data(); + const T* input = ref_input.data(); + const T* weight = ref_weight.data(); + dim3 grid(m); + + if (n % 8 == 0 && std::is_same::value) { + dim3 block(min(1024, (n / 8 + 31) / 32 * 32)); + + rmsnorm_twoPassAlgo_e8<<>>( + (float4 *)output, (const float4 *)input, (const float4 *)weight, m, n); + } else { + dim3 block(min(1024, ((n + 31)/32 + 31)/32*32)); + + rmsnorm_twoPassAlgo_e1<<>>( + output, input, weight, m, n); + } + + auto result = cudaGetLastError(); + if (result != cudaSuccess) { + std::cerr << "CUDA error: " << cudaGetErrorString(result) << std::endl; + abort(); + } +} + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_utils.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..00414a5af00be3a2d01c04651b79a4431238587d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/device_utils.h @@ -0,0 +1,127 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief utils code for device cutlass code +*/ + +#pragma once + +#include +#include +#define FINAL_MASK 0xffffffff + +struct half4 { + half x, y, z, w; +}; + +template +__inline__ __device__ T warpReduceSum(T* val) +{ +#pragma unroll + for (int i = 0; i < NUM; i++) { +#pragma unroll + for (int mask = 16; mask > 0; mask >>= 1) + val[i] += __shfl_xor_sync(FINAL_MASK, val[i], mask, 32); + } + return (T)(0.0f); +} + +template +__inline__ __device__ T blockReduceSum(T* val) +{ + __shared__ T shared[NUM][33]; + int lane = threadIdx.x & 0x1f; + int wid = threadIdx.x >> 5; + + warpReduceSum(val); + + if (lane == 0) { +#pragma unroll + for (int i = 0; i < NUM; i++) { + shared[i][wid] = val[i]; + } + } + + __syncthreads(); + + bool is_mask = threadIdx.x < (blockDim.x / 32.f); +#pragma unroll + for (int i = 0; i < NUM; i++) { + val[i] = is_mask ? shared[i][lane] : (T)(0.0f); + } + warpReduceSum(val); + return (T)0.0f; +} + +template +__inline__ __device__ T warpReduceMax(T* val) +{ +#pragma unroll + for (int i = 0; i < NUM; i++) { +#pragma unroll + for (int mask = 16; mask > 0; mask >>= 1) + val[i] = max(val[i], __shfl_xor_sync(FINAL_MASK, val[i], mask, 32)); + } + return (T)(0.0f); +} + +template +__inline__ __device__ T blockReduceMax(T* val) +{ + static __shared__ T shared[32][NUM]; + int lane = threadIdx.x & 0x1f; // in-warp idx + int wid = threadIdx.x >> 5; // warp idx + + warpReduceMax(val); // get maxx in each warp + + if (lane == 0) // record in-warp maxx by warp Idx + { +#pragma unroll + for (int i = 0; i < NUM; i++) { + shared[wid][i] = val[i]; + } + } + + __syncthreads(); + + // Modify from blockDim.x << 5 to blockDim.x / 32. to prevent + // blockDim.x is not divided by 32 + bool is_mask = threadIdx.x < (blockDim.x / 32.f); +#pragma unroll + for (int i = 0; i < NUM; i++) { + val[i] = is_mask ? shared[lane][i] : (T)(-FLT_MAX); + } + warpReduceMax(val); + + return (T)0.0f; +} + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/distribution.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/distribution.h new file mode 100644 index 0000000000000000000000000000000000000000..d5557d952a8f832e17c3ada35e9102ef05d4d4fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/distribution.h @@ -0,0 +1,150 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +/*! \file + \brief This header contains a class to parametrize a statistical distribution function. +*/ + +#include + +namespace cutlass { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Distribution type +struct Distribution { + /// Variant types + enum Kind { Invalid, Uniform, Gaussian, Identity, Sequential, AllZeros, AllOnes }; + + /// Distribution state + union { + /// Uniform distribution + struct { + double min; + double max; + } uniform; + + /// Gaussian distribution + struct { + double mean; + double stddev; + double pnz; + double pnzA; + double pnzB; + double pnzC; + } gaussian; + + /// Elements are linear combination of row and column index + struct { + double start; + double delta; + } sequential; + }; + + /// Active variant kind + Kind kind; + + /// Random values are cast to integer after scaling by this power of two + int int_scale; + + // + // Methods + // + + Distribution() : kind(Invalid), int_scale(0) {} + + /// Configures distribution as uniform random + Distribution &set_uniform(double _min, double _max, int _int_scale = 0) { + kind = Uniform; + uniform.min = _min; + uniform.max = _max; + int_scale = _int_scale; + return *this; + } + + /// Configures distribution as Gaussian distribution + Distribution &set_gaussian(double _mean, double _stddev, int _int_scale = 0, double _pnz = 100.0) { + kind = Gaussian; + gaussian.mean = _mean; + gaussian.stddev = _stddev; + gaussian.pnz = _pnz; + int_scale = _int_scale; + return *this; + } + + /// Sets identity + Distribution &set_identity() { + kind = Identity; + return *this; + } + + /// Sets sequential + Distribution &set_sequential(double start, double delta, int _int_scale = 0) { + kind = Sequential; + sequential.start = start; + sequential.delta = delta; + int_scale = _int_scale; + return *this; + } +}; + +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Prints a Distribution to ostream +inline std::ostream &operator<<(std::ostream &out, cutlass::Distribution const &dist) { + switch (dist.kind) { + case cutlass::Distribution::Uniform: + out << "uniform, min: " << dist.uniform.min << ", max: " << dist.uniform.max; + break; + case cutlass::Distribution::Gaussian: + out << "gaussian, mean: " << dist.gaussian.mean << ", stddev: " << dist.gaussian.stddev + << ", pnzA: " << dist.gaussian.pnzA << ", pnzB: " + << dist.gaussian.pnzB << ", pnzC: " << dist.gaussian.pnzC; + break; + case cutlass::Distribution::Identity: + out << "identity"; + break; + case cutlass::Distribution::Sequential: + out << "sequential"; + break; + default: + out << "unknown"; + } + + out << ", int_scale: " << dist.int_scale; + + return out; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/exceptions.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..a349d49e892044f5478f9f34e0cf3e8855fb5903 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/exceptions.h @@ -0,0 +1,69 @@ +/****************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +/** + * \file + * \brief C++ exception semantics for CUDA error codes + */ + +#include +#include +#include + +#include "cutlass/platform/platform.h" + +namespace cutlass { + +/// C++ exception wrapper for CUDA \p cudaError_t +class cuda_exception : public std::exception { + public: + /// Constructor + cuda_exception(const char* msg = "", cudaError_t err = cudaErrorUnknown) : msg(msg), err(err) {} + + /// Returns the underlying CUDA \p cudaError_t + cudaError_t cudaError() const { return err; } + + protected: + /// Explanatory string + const char* msg; + + /// Underlying CUDA \p cudaError_t + cudaError_t err; +}; + +/// Writes a cuda_exception instance to an output stream +inline std::ostream& operator<<(std::ostream& out, cuda_exception const& e) { + return out << e.what() << ": " << cudaGetErrorString(e.cudaError()); +} + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/gett_commandline.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/gett_commandline.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d5be80dd02d1e76d50247cd8317297c343da3472 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/gett_commandline.hpp @@ -0,0 +1,369 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief GETT command line parser to gather semantic modes, their stride order, and extents. +*/ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cutlass/util/command_line.h" + +namespace cutlass { + +// Output shortcuts +std::ostream& operator<<(std::ostream& os, std::vector data) { + for (auto& a : data) os << a; + return os; +} + +template +std::ostream& operator<<(std::ostream& os, std::vector data) { + for (auto& a : data) os << a << " "; + return os; +} + +struct GettCommandLine { + struct GettProblem { + using extent_type = int; + using stride_type = int64_t; + + // Row modes: appear in A and C/D + std::vector M; + std::vector ldAm; + std::vector ldCm; + + // Column modes: appear in B and C/D + std::vector N; + std::vector ldBn; + std::vector ldCn; + + // Reduction modes: appear in A and B + std::vector K; + std::vector ldAk; + std::vector ldBk; + + // Batch modes: appear in all in/out tensors + std::vector L; + std::vector ldAl; + std::vector ldBl; + std::vector ldCl; + }; + + static GettProblem + parse(int argc, char const* argv[], bool parse_verbose = false) { + using extent_type = typename GettProblem::extent_type; + using stride_type = typename GettProblem::stride_type; + + cutlass::CommandLine cmd(argc, argv); + + // modeA + std::vector a_mode; + cmd.get_cmd_line_arguments("modeA", a_mode); + + // modeB + std::vector b_mode; + cmd.get_cmd_line_arguments("modeB", b_mode); + + // modeC + std::vector c_mode; + cmd.get_cmd_line_arguments("modeC", c_mode); + + + // mode_sizes + std::map mode_size; + // First, initialize all modes in a, b, c to make sure they're in map + for (char a : a_mode) mode_size[a] = 1; + for (char b : b_mode) mode_size[b] = 1; + for (char c : c_mode) mode_size[c] = 1; + + // Then, overwrite the ones in -extent + std::vector > extent_tokens; + cmd.get_cmd_line_argument_pairs("extents", extent_tokens); + for (auto e : extent_tokens) { + if (std::get<0>(e).size() > 1) { + std::cerr << "ERROR: Mode name must only be 1 character long.\n"; + print_usage(); + exit(1); + } + char label = std::get<0>(e)[0]; + int size = std::stoi(std::get<1>(e)); + mode_size[label] = size; + } + + // Print out symbolic modes and their extents + if (parse_verbose) { + std::cout << "C_" << c_mode << " = A_" << a_mode << " * B_" << b_mode << "\n"; + for (auto e : mode_size) std::cout << " " << std::get<0>(e) << " : " << std::get<1>(e) << "\n"; + } + + // + // Collect/Compute strides + // + + std::map mode_ldA; + std::map mode_ldB; + std::map mode_ldC; + + { + stride_type current; + + current = 1; + for (char a : a_mode) { mode_ldA[a] = current; current *= mode_size[a]; } + + current = 1; + for (char b : b_mode) { mode_ldB[b] = current; current *= mode_size[b]; } + + current = 1; + for (char c : c_mode) { mode_ldC[c] = current; current *= mode_size[c]; } + } + + // + // Collect mode categories + // + + std::vector row_mode; // rows + std::vector col_mode; // columns + std::vector red_mode; // reductions + std::vector bat_mode; // batches + + { + std::vector a_label = a_mode; + std::vector b_label = b_mode; + std::vector c_label = c_mode; + + std::sort(std::begin(a_label), std::end(a_label)); + std::sort(std::begin(b_label), std::end(b_label)); + std::sort(std::begin(c_label), std::end(c_label)); + + // std::set_intersections to find semantic category of each symbolic mode + std::set_intersection(std::begin(a_label), std::end(a_label), + std::begin(c_label), std::end(c_label), + std::back_inserter(row_mode)); + + std::set_intersection(std::begin(b_label), std::end(b_label), + std::begin(c_label), std::end(c_label), + std::back_inserter(col_mode)); + + std::set_intersection(std::begin(a_label), std::end(a_label), + std::begin(b_label), std::end(b_label), + std::back_inserter(red_mode)); + + std::set_intersection(std::begin(row_mode), std::end(row_mode), + std::begin(col_mode), std::end(col_mode), + std::back_inserter(bat_mode)); + + // std::set_difference to remove batch modes from other semantic modes + for (char l : bat_mode) { + row_mode.erase(std::remove(std::begin(row_mode), std::end(row_mode), l), std::end(row_mode)); + col_mode.erase(std::remove(std::begin(col_mode), std::end(col_mode), l), std::end(col_mode)); + red_mode.erase(std::remove(std::begin(red_mode), std::end(red_mode), l), std::end(red_mode)); + } + } + + // Print out the semantic association of each symbolic mode + if (parse_verbose) { + std::cout << " rows : " << row_mode << '\n'; + std::cout << " cols : " << col_mode << '\n'; + std::cout << " reds : " << red_mode << '\n'; + std::cout << " bats : " << bat_mode << '\n'; + } + + // + // Permute modes + // + + // Permute the batched modes to promote coalescing + // Sort the batched modes by min(ldAl,ldBl) and in case of a tie by the size + std::sort(std::begin(bat_mode), std::end(bat_mode), [&](char l1, char l2) { + return std::tie(std::min(mode_ldA[l1],mode_ldB[l1]),mode_size[l1]) + < std::tie(std::min(mode_ldA[l2],mode_ldB[l2]),mode_size[l2]); + }); + // Compute sizes and strides of ordered reduction modes + std::vector L; + std::vector ldAl; + std::vector ldBl; + std::vector ldCl; + for (char l : bat_mode) { + L.push_back(mode_size[l]); + ldAl.push_back(mode_ldA[l]); + ldBl.push_back(mode_ldB[l]); + ldCl.push_back(mode_ldC[l]); + } + + // Permute the reduction modes to promote coalescing + // Sort the reduction modes by min(ldAk,ldBk) and in case of a tie by the size + std::sort(std::begin(red_mode), std::end(red_mode), [&](char k1, char k2) { + return std::tie(std::min(mode_ldA[k1],mode_ldB[k1]),mode_size[k1]) + < std::tie(std::min(mode_ldA[k2],mode_ldB[k2]),mode_size[k2]); + }); + // Compute sizes and strides of ordered reduction modes + std::vector K; + std::vector ldAk; + std::vector ldBk; + for (char k : red_mode) { + K.push_back(mode_size[k]); + ldAk.push_back(mode_ldA[k]); + ldBk.push_back(mode_ldB[k]); + } + + // Permute the row modes to promote coalescing + // Sort the row modes by min(ldAm,ldCm) and in case of a tie by ldAm + std::sort(std::begin(row_mode), std::end(row_mode), [&](char m1, char m2) { + return std::tie(std::min(mode_ldA[m1],mode_ldC[m1]),mode_ldA[m1]) + < std::tie(std::min(mode_ldA[m2],mode_ldC[m2]),mode_ldA[m2]); + }); + // Compute sizes and strides of ordered row modes + std::vector M; + std::vector ldAm; + std::vector ldCm; + for (char m : row_mode) { + M.push_back(mode_size[m]); + ldAm.push_back(mode_ldA[m]); + ldCm.push_back(mode_ldC[m]); + } + + // Permute the col modes to promote coalescing + // Sort the col modes by min(ldBn,ldCn) and in case of a tie by ldBn + std::sort(std::begin(col_mode), std::end(col_mode), [&](char n1, char n2) { + return std::tie(std::min(mode_ldB[n1],mode_ldC[n1]),mode_ldB[n1]) + < std::tie(std::min(mode_ldB[n2],mode_ldC[n2]),mode_ldB[n2]); + }); + // Compute sizes and strides of ordered col modes + std::vector N; + std::vector ldBn; + std::vector ldCn; + for (char n : col_mode) { + N.push_back(mode_size[n]); + ldBn.push_back(mode_ldB[n]); + ldCn.push_back(mode_ldC[n]); + } + + if (parse_verbose) { + std::cout << "C_"; + if (! row_mode.empty()) { + std::cout << "(" << row_mode << ")"; + } + if (! col_mode.empty()) { + std::cout << "(" << col_mode << ")"; + } + if (! bat_mode.empty()) { + std::cout << "(" << bat_mode << ")"; + } + std::cout << " = A_"; + if (! row_mode.empty()) { + std::cout << "(" << row_mode << ")"; + } + if (! red_mode.empty()) { + std::cout << "(" << red_mode << ")"; + } + if (! bat_mode.empty()) { + std::cout << "(" << bat_mode << ")"; + } + std::cout << " * B_"; + if (! col_mode.empty()) { + std::cout << "(" << col_mode << ")"; + } + if (! red_mode.empty()) { + std::cout << "(" << red_mode << ")"; + } + if (! bat_mode.empty()) { + std::cout << "(" << bat_mode << ")"; + } + std::cout << '\n'; + + int M_size = std::accumulate(std::begin(M), std::end(M), 1, std::multiplies<>{}); + int N_size = std::accumulate(std::begin(N), std::end(N), 1, std::multiplies<>{}); + int K_size = std::accumulate(std::begin(K), std::end(K), 1, std::multiplies<>{}); + int L_size = std::accumulate(std::begin(L), std::end(L), 1, std::multiplies<>{}); + + std::cout << " M : (" << M_size << ") "; + for (char m : row_mode) std::cout << m << ":" << mode_size[m] << " "; + std::cout << '\n'; + std::cout << " N : (" << N_size << ") "; + for (char n : col_mode) std::cout << n << ":" << mode_size[n] << " "; + std::cout << '\n'; + std::cout << " K : (" << K_size << ") "; + for (char k : red_mode) std::cout << k << ":" << mode_size[k] << " "; + std::cout << '\n'; + std::cout << " L : (" << L_size << ") "; + for (char l : bat_mode) std::cout << l << ":" << mode_size[l] << " "; + std::cout << '\n'; + + std::cout << " ldAm : " << ldAm << '\n'; + std::cout << " ldAk : " << ldAk << '\n'; + std::cout << " ldAl : " << ldAl << '\n'; + std::cout << " ldBn : " << ldBn << '\n'; + std::cout << " ldBk : " << ldBk << '\n'; + std::cout << " ldBl : " << ldBl << '\n'; + std::cout << " ldCm : " << ldCm << '\n'; + std::cout << " ldCn : " << ldCn << '\n'; + std::cout << " ldCl : " << ldCl << '\n'; + } + + return {M, ldAm, ldCm, + N, ldBn, ldCn, + K, ldAk, ldBk, + L, ldAl, ldBl, ldCl}; + } + + static void + print_usage() { + std::cout << + "GETT problem command line parser:\n" + " --modeA=\n" + " A comma delimited list of characters that correspond to the row, reduction, and batch modes in A tensor.\n" + " The semantic association of each symbolic mode is determined automatically.\n\n" + + " --modeB=\n" + " A comma delimited list of characters that correspond to the column, reduction, and batch modes in B tensor.\n" + " The semantic association of each symbolic mode is determined automatically.\n\n" + + " --modeC=\n" + " A comma delimited list of characters that correspond to the row, column, and batch modes in B tensor.\n" + " The semantic association of each symbolic mode is determined automatically.\n\n" + + " --extents=\n" + " A command delimited list of symbolic mode and its corresponding extent.\n" + " Extents are defaulted to 1 if any are not provided.\n\n" + + "Example usage: gett.exe --modeC=m,n,l --modeA=m,k,l --modeB=k,n,l --extents=m:4096,n:4096,k:4096\n"; + } +}; + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/helper_cuda.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/helper_cuda.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d840db56e83f9e345592105fb3586dbabaeab53c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/helper_cuda.hpp @@ -0,0 +1,116 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include + +#include + +namespace cute +{ + +void +device_init(int device_id, bool quiet = false) +{ + cudaDeviceProp device_prop; + std::size_t device_free_physmem; + std::size_t device_total_physmem; + + CUTE_CHECK_ERROR(cudaSetDevice(device_id)); + CUTE_CHECK_ERROR(cudaMemGetInfo(&device_free_physmem, &device_total_physmem)); + CUTE_CHECK_ERROR(cudaGetDeviceProperties(&device_prop, device_id)); + + if (device_prop.major < 1) { + fprintf(stderr, "Device does not support CUDA.\n"); + exit(1); + } + + //float device_giga_bandwidth = float(device_prop.memoryBusWidth) * device_prop.memoryClockRate * 2 / 8 / 1000 / 1000; + + if (!quiet) { + printf("Using device %d: %s (SM%d, %d SMs)\n", + device_id, device_prop.name, + device_prop.major * 10 + device_prop.minor, + device_prop.multiProcessorCount); + fflush(stdout); + } +} + +/** + * Convert the SM version (e.g. v7.0, v7.5) to the physical number of cores. + */ +inline int +_ConvertSMVer2Cores(int major, int minor) +{ + // Defines for GPU Architecture types (using the SM version to determine + // the # of cores per SM + typedef struct { + int SM; // 0xMm (hexadecimal notation), M = SM Major version, + // and m = SM minor version + int Cores; + } sSMtoCores; + + sSMtoCores nGpuArchCoresPerSM[] = { + {0x30, 192}, + {0x32, 192}, + {0x35, 192}, + {0x37, 192}, + {0x50, 128}, + {0x52, 128}, + {0x53, 128}, + {0x60, 64}, + {0x61, 128}, + {0x62, 128}, + {0x70, 64}, + {0x72, 64}, + {0x75, 64}, + {-1, -1}}; + + int index = 0; + + while (nGpuArchCoresPerSM[index].SM != -1) { + if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { + return nGpuArchCoresPerSM[index].Cores; + } + index++; + } + + // If we don't find the values, we default use the previous one + // to run properly + printf("MapSMtoCores for SM %d.%d is undefined." + " Default to use %d Cores/SM\n", + major, minor, nGpuArchCoresPerSM[index - 1].Cores); + + return nGpuArchCoresPerSM[index - 1].Cores; +} + +} // end namespace cute diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_reorder.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_reorder.h new file mode 100644 index 0000000000000000000000000000000000000000..c17c0a2749b38861b518cc42e2ce3253b51f32ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_reorder.h @@ -0,0 +1,111 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief reorder data from the host side +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/tensor_view.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/gemm.h" + +namespace cutlass { + +/// This is needed for the interleaved integer tensor core kernels. The purpose +/// is to use skip the shared memory part in the epilogue. +template +void reorder_column(TensorRef dest, + TensorRef src, + cutlass::gemm::GemmCoord problem_size) { + const int InstructionShapeCol = 8; + // 4 threads per Quad + const int ElementsPerThread = InstructionShapeCol / 4; + // 4 threads per Quad + const int ReorderedElementsPerThread = + Interleaved / 4; + + for (int n = 0; n < problem_size.n(); n++) { + for (int k = 0; k < problem_size.k(); k++) { + dest.at({k, (n / Interleaved) * Interleaved + + ((n % ReorderedElementsPerThread) / ElementsPerThread) * + InstructionShapeCol + + ((n % Interleaved) / ReorderedElementsPerThread) * + ElementsPerThread + + (n % ElementsPerThread)}) = src.at({k, n}); + } + } +} + +template +void reorder_convK(TensorRef dest, + TensorRef src, + cutlass::gemm::GemmCoord problem_size) { + + TensorRef> mappedDest(dest.data(), dest.stride(0)); + TensorRef> mappedSrc(src.data(), src.stride(0)); + + reorder_column( + mappedDest, mappedSrc, problem_size); +} + +/// This is needed for the sparse tensor core kernels. The purpose +/// is to use ldmatrix to load from shared memory to the register file. +template +void reorder_meta(TensorRef dest, + TensorRef src, + cutlass::gemm::GemmCoord problem_size) { + for (int m = 0; m < problem_size.m(); m++) { + for (int k = 0; k < problem_size.k(); k++) { + // First reorder the rows. + int group = (sizeof(Element) == 2) ? 32 : 16; + int interweave = (sizeof(Element) == 2) ? 4 : 2; + + int dest_row = m / group * group + (m % 8) * interweave + (m % group) / 8; + int dest_col = k; + + // Next swizzle the 2x2 blocks from Z to N. + if (((dest_row % 2) == 0) && ((dest_col % 2) == 1)) { + ++dest_row; + --dest_col; + } else if (((dest_row % 2) == 1) && ((dest_col % 2) == 0)) { + --dest_row; + ++dest_col; + } + + dest.at({dest_row, dest_col}) = src.at({m, k}); + } + } +} +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_tensor.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..4b2b8d152bbb68aae38efe6f05a6d20c85d93c29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_tensor.h @@ -0,0 +1,522 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +/*! \file + \brief HostTensor contributes management for both host and device memory. + + HostTensor allocates host and device memory upon construction. Basic element-wise operations on + host memory synchronize device memory automatically. Explicit copy operations provide abstractions + for CUDA memcpy operations. + + Call {host, device}_{data, ref, view}() for accessing host or device memory. + + See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details. +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" +#include "cutlass/fast_math.h" + +#include "device_memory.h" + +namespace cutlass { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Host tensor +template < + /// Data type of element stored within tensor (concept: NumericType) + typename Element_, + /// Defines a mapping from logical coordinate to linear memory (concept: Layout) + typename Layout_ +> +class HostTensor { +public: + + /// Data type of individual access + using Element = Element_; + + /// Mapping function from logical coordinate to linear memory + using Layout = Layout_; + + /// Logical rank of tensor index space + static int const kRank = Layout::kRank; + + /// Index type + using Index = typename Layout::Index; + + /// Long index used for pointer offsets + using LongIndex = typename Layout::LongIndex; + + /// Coordinate in logical tensor space + using TensorCoord = typename Layout::TensorCoord; + + /// Layout's stride vector + using Stride = typename Layout::Stride; + + /// Tensor reference to device memory + using TensorRef = TensorRef; + + /// Tensor reference to constant device memory + using ConstTensorRef = typename TensorRef::ConstTensorRef; + + /// Tensor reference to device memory + using TensorView = TensorView; + + /// Tensor reference to constant device memory + using ConstTensorView = typename TensorView::ConstTensorView; + + /// Reference to element in tensor + using Reference = typename TensorRef::Reference; + + /// Constant reference to element in tensor + using ConstReference = typename ConstTensorRef::Reference; + + /// Note: Below is used to handle packing of subbyte elements + /// kBitsStoredVec : The bits of store vec that could be divisiable by the element + /// kElementsPerStoredVec : The number of elements could be stored in per store vec + /// kNumStoragePerStoredVec : How much storage(i.e. sizeof(element storage)) the store vec needs to consume. + /// Usually the element storage of subbyte is uint8_t. + /// Example + /// int2: kBitsStoredVec = 8; kElementsPerStoredVec = 4; kNumStoragePerStoredVec = 1 uint8_t; + /// int4: kBitsStoredVec = 8; kElementsPerStoredVec = 2; kNumStoragePerStoredVec = 1 uint8_t; + static int const kBitsStoredVec = (sizeof_bits::value < 8) ? cutlass::lcm(sizeof_bits::value, 8) : sizeof_bits::value; + static int const kElementsPerStoredVec = kBitsStoredVec / sizeof_bits::value; + static int const kNumStoragePerStoredVec = kBitsStoredVec / (sizeof(Element) * 8); + + private: + + // + // Data members + // + + /// Extent of tensor in logical dimensions + TensorCoord extent_; + + /// Layout object + Layout layout_; + + /// Host-side memory allocation + std::vector host_; + + /// Device-side memory + device_memory::allocation device_; + + public: + // + // Device and Host Methods + // + + /// Default constructor + HostTensor() {} + + /// Constructs a tensor given an extent. Assumes a packed layout + HostTensor( + TensorCoord const &extent, + bool device_backed = true + ) { + + this->reset(extent, Layout::packed(extent), device_backed); + } + + /// Constructs a tensor given an extent and layout + HostTensor( + TensorCoord const &extent, + Layout const &layout, + bool device_backed = true + ) { + + this->reset(extent, layout, device_backed); + } + + ~HostTensor() { } + + /// Clears the HostTensor allocation to size/capacity = 0 + void reset() { + extent_ = TensorCoord(); + layout_ = Layout::packed(extent_); + + host_.clear(); + device_.reset(); + } + + /// Resizes internal memory allocations without affecting layout or extent + void reserve( + size_t count, ///< size of tensor in elements + bool device_backed_ = true) { ///< if true, device memory is also allocated + + device_.reset(); + host_.clear(); + + count = count / kElementsPerStoredVec * kNumStoragePerStoredVec; + host_.resize(count); + + // Allocate memory + Element* device_memory = nullptr; + if (device_backed_) { + device_memory = device_memory::allocate(count); + } + device_.reset(device_memory, device_backed_ ? count : 0); + } + + /// Updates the extent and layout of the HostTensor. Allocates memory according to the new + /// extent and layout. + void reset( + TensorCoord const &extent, ///< extent of logical tensor + Layout const &layout, ///< layout object of tensor + bool device_backed_ = true) { ///< if true, device memory is also allocated. + + extent_ = extent; + layout_ = layout; + + reserve(size_t(layout_.capacity(extent_)), device_backed_); + } + + /// Updates the extent and layout of the HostTensor. Allocates memory according to the new + /// extent and layout. Assumes a packed tensor configuration. + void reset( + TensorCoord const &extent, ///< extent of logical tensor + bool device_backed_ = true) { ///< if true, device memory is also allocated. + + reset(extent, Layout::packed(extent), device_backed_); + } + + /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. + /// To force allocation, call reset(). + void resize( + TensorCoord const &extent, ///< extent of logical tensor + Layout const &layout, ///< layout object of tensor + bool device_backed_ = true) { ///< if true, device memory is also allocated. + + extent_ = extent; + layout_ = layout; + + LongIndex new_size = size_t(layout_.capacity(extent_)); + + if (static_cast(new_size) > host_.size()) { + reserve(new_size, device_backed_); + } + } + + /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. + /// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration. + void resize( + TensorCoord const &extent, ///< extent of logical tensor + bool device_backed_ = true) { ///< if true, device memory is also allocated. + + resize(extent, Layout::packed(extent), device_backed_); + } + + /// Returns the number of elements stored in the host tensor + size_t size() const { + return host_.size() / kNumStoragePerStoredVec * kElementsPerStoredVec; + } + + /// Returns the logical capacity based on extent and layout. May differ from size(). + LongIndex capacity() const { + return layout_.capacity(extent_); + } + + /// Gets pointer to host data + Element * host_data() { return host_.data(); } + + /// Gets pointer to host data with a pointer offset + Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return &ReferenceFactory::get(host_.data(), ptr_element_offset); } + + /// Gets a reference to an element in host memory + Reference host_data(LongIndex idx) { + return ReferenceFactory::get(host_data(), idx); + } + + /// Gets pointer to host data + Element const * host_data() const { return host_.data(); } + + /// Gets pointer to host data with a pointer offset + Element const * host_data_ptr_offset(LongIndex ptr_element_offset) const { return &ReferenceFactory::get(host_.data(), ptr_element_offset); } + + /// Gets a constant reference to an element in host memory + ConstReference host_data(LongIndex idx) const { + return ReferenceFactory::get(host_data(), idx); + } + + /// Gets pointer to device data + Element * device_data() { return device_.get(); } + + /// Gets pointer to device data + Element const * device_data() const { return device_.get(); } + + /// Gets pointer to device data with a pointer offset + Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return &ReferenceFactory::get(device_data(), ptr_element_offset); } + + /// Gets pointer to device data with a pointer offset + Element const * device_data_ptr_offset(LongIndex ptr_element_offset) const { return &ReferenceFactory::get(device_data(), ptr_element_offset); } + + /// Accesses the tensor reference pointing to data + TensorRef host_ref(LongIndex ptr_element_offset=0) { return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_); } + + /// Accesses the tensor reference pointing to data + ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const { return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_); } + + /// Accesses the tensor reference pointing to data + TensorRef device_ref(LongIndex ptr_element_offset=0) { + return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_); + } + + /// Accesses the tensor reference pointing to data + ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const { + return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_); + } + + /// Accesses the tensor reference pointing to data + TensorView host_view(LongIndex ptr_element_offset=0) { + return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, extent_); + } + + /// Accesses the tensor reference pointing to data + ConstTensorView host_view(LongIndex ptr_element_offset=0) const { + return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, extent_); + } + + /// Accesses the tensor reference pointing to data + TensorView device_view(LongIndex ptr_element_offset=0) { + return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, extent_); + } + + /// Accesses the tensor reference pointing to data + ConstTensorView device_view(LongIndex ptr_element_offset=0) const { + return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, extent_); + } + + /// Returns true if device memory is allocated + bool device_backed() const { + return (device_.get() == nullptr) ? false : true; + } + + + /// Returns the layout object + Layout & layout() { + return layout_; + } + + /// Returns the layout object + Layout layout() const { + return layout_; + } + + /// Returns the layout object's stride vector + Stride stride() const { + return layout_.stride(); + } + + /// Returns the layout object's stride vector + Stride & stride() { + return layout_.stride(); + } + + /// Returns the layout object's stride in a given physical dimension + LongIndex stride(int dim) const { + return layout_.stride().at(dim); + } + + /// Returns the layout object's stride in a given physical dimension + LongIndex & stride(int dim) { + return layout_.stride().at(dim); + } + + /// Computes the offset of an index from the origin of the tensor + LongIndex offset(TensorCoord const& coord) const { + return layout_(coord); + } + + /// Returns a reference to the element at the logical Coord in host memory + Reference at(TensorCoord const& coord) { + return host_data(offset(coord)); + } + + /// Returns a const reference to the element at the logical Coord in host memory + ConstReference at(TensorCoord const& coord) const { + return host_data(offset(coord)); + } + + /// Returns the extent of the tensor + TensorCoord extent() const { + return extent_; + } + + /// Returns the extent of the tensor + TensorCoord & extent() { + return extent_; + } + + /// Copies data from device to host + void sync_host() { + if (device_backed()) { + device_memory::copy_to_host( + host_data(), device_data(), size()); + } + } + + /// Copies data from host to device + void sync_device() { + if (device_backed()) { + device_memory::copy_to_device( + device_data(), host_data(), size()); + } + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_in_device_to_host( + Element const* ptr_device, ///< source device memory + LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + device_memory::copy_to_host( + host_data(), ptr_device, count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_in_device_to_device( + Element const* ptr_device, ///< source device memory + LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + device_memory::copy_device_to_device( + device_data(), ptr_device, count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_in_host_to_device( + Element const* ptr_host, ///< source host memory + LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + device_memory::copy_to_device( + device_data(), ptr_host, count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_in_host_to_host( + Element const* ptr_host, ///< source host memory + LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + device_memory::copy_host_to_host( + host_data(), ptr_host, count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_out_device_to_host( + Element * ptr_host, ///< source device memory + LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + device_memory::copy_to_host( + ptr_host, device_data(), count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_out_device_to_device( + Element * ptr_device, ///< source device memory + LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + device_memory::copy_device_to_device( + ptr_device, device_data(), count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_out_host_to_device( + Element * ptr_device, ///< source host memory + LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + device_memory::copy_to_device( + ptr_device, host_data(), count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_out_host_to_host( + Element * ptr_host, ///< source host memory + LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + device_memory::copy_host_to_host( + ptr_host, host_data(), count); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_tensor_planar_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_tensor_planar_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..c548d9c01e6205b4873d163d6b29ccde55950885 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_tensor_planar_complex.h @@ -0,0 +1,591 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +/*! \file + \brief HostTensor contributes management for both host and device memory. + + HostTensor allocates host and device memory upon construction. Basic element-wise operations on + host memory synchronize device memory automatically. Explicit copy operations provide abstractions + for CUDA memcpy operations. + + Call {host, device}_{data, ref, view}() for accessing host or device memory. + + See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details. +*/ + +#include + +#include "cutlass/cutlass.h" + +#include "cutlass/tensor_ref_planar_complex.h" +#include "cutlass/tensor_view_planar_complex.h" + +#include "device_memory.h" + +namespace cutlass { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Host tensor +template < + /// Data type of element stored within tensor (concept: NumericType) + typename Element_, + /// Defines a mapping from logical coordinate to linear memory (concept: Layout) + typename Layout_ +> +class HostTensorPlanarComplex { +public: + + /// Data type of individual access + using Element = Element_; + + /// Mapping function from logical coordinate to linear memory + using Layout = Layout_; + + /// Logical rank of tensor index space + static int const kRank = Layout::kRank; + + /// Index type + using Index = typename Layout::Index; + + /// Long index used for pointer offsets + using LongIndex = typename Layout::LongIndex; + + /// Coordinate in logical tensor space + using TensorCoord = typename Layout::TensorCoord; + + /// Layout's stride vector + using Stride = typename Layout::Stride; + + /// Tensor reference to device memory + using TensorRef = TensorRefPlanarComplex; + + /// Tensor reference to constant device memory + using ConstTensorRef = typename TensorRef::ConstTensorRef; + + /// Tensor reference to device memory + using TensorView = TensorViewPlanarComplex; + + /// Tensor reference to constant device memory + using ConstTensorView = typename TensorView::ConstTensorView; + + /// Reference to element in tensor + using Reference = typename TensorRef::Reference; + + /// Constant reference to element in tensor + using ConstReference = typename ConstTensorRef::Reference; + + private: + + // + // Data members + // + + /// Extent of tensor in logical dimensions + TensorCoord extent_; + + /// Layout object + Layout layout_; + + /// Host-side memory allocation + std::vector host_; + + /// Device-side memory + device_memory::allocation device_; + + public: + // + // Device and Host Methods + // + + /// Default constructor + HostTensorPlanarComplex() {} + + /// Constructs a tensor given an extent. Assumes a packed layout + HostTensorPlanarComplex( + TensorCoord const &extent, + bool device_backed = true + ) { + + this->reset(extent, Layout::packed(extent), device_backed); + } + + /// Constructs a tensor given an extent and layout + HostTensorPlanarComplex( + TensorCoord const &extent, + Layout const &layout, + bool device_backed = true + ) { + + this->reset(extent, layout, device_backed); + } + + ~HostTensorPlanarComplex() { } + + /// Clears the HostTensor allocation to size/capacity = 0 + void reset() { + extent_ = TensorCoord(); + layout_ = Layout::packed(extent_); + + host_.clear(); + device_.reset(); + } + + /// Resizes internal memory allocations without affecting layout or extent + void reserve( + size_t count, ///< size of tensor in elements + bool device_backed_ = true) { ///< if true, device memory is also allocated + + device_.reset(); + host_.clear(); + + host_.resize(count * 2); + + // Allocate memory + Element* device_memory = nullptr; + if (device_backed_) { + device_memory = device_memory::allocate(count * 2); + } + device_.reset(device_memory, device_backed_ ? count * 2 : 0); + } + + /// Updates the extent and layout of the HostTensor. Allocates memory according to the new + /// extent and layout. + void reset( + TensorCoord const &extent, ///< extent of logical tensor + Layout const &layout, ///< layout object of tensor + bool device_backed_ = true) { ///< if true, device memory is also allocated. + + extent_ = extent; + layout_ = layout; + + reserve(size_t(layout_.capacity(extent_)), device_backed_); + } + + /// Updates the extent and layout of the HostTensor. Allocates memory according to the new + /// extent and layout. Assumes a packed tensor configuration. + void reset( + TensorCoord const &extent, ///< extent of logical tensor + bool device_backed_ = true) { ///< if true, device memory is also allocated. + + reset(extent, Layout::packed(extent), device_backed_); + } + + /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. + /// To force allocation, call reset(). + void resize( + TensorCoord const &extent, ///< extent of logical tensor + Layout const &layout, ///< layout object of tensor + bool device_backed_ = true) { ///< if true, device memory is also allocated. + + extent_ = extent; + layout_ = layout; + + LongIndex new_size = size_t(layout_.capacity(extent_)); + + if (static_cast(new_size * 2) > host_.size()) { + reserve(new_size); + } + } + + /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. + /// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration. + void resize( + TensorCoord const &extent, ///< extent of logical tensor + bool device_backed_ = true) { ///< if true, device memory is also allocated. + + resize(extent, Layout::packed(extent), device_backed_); + } + + /// Returns the number of elements stored in the host tensor + size_t size() const { + return host_.size() / 2; + } + + /// Returns the logical capacity based on extent and layout. May differ from size(). + LongIndex capacity() const { + return layout_.capacity(extent_); + } + + /// Stride between real and imaginary parts + LongIndex imaginary_stride() const { + return host_.size() / 2; + } + + /// Gets pointer to host data + Element * host_data() { return host_.data(); } + + /// Gets pointer to host data imaginary part + Element * host_data_imag() { return host_.data() + imaginary_stride(); } + + /// Gets pointer to host data with a pointer offset + Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return host_data() + ptr_element_offset; } + + /// Gets pointer to host data with a pointer offset + Element * host_data_imag_ptr_offset(LongIndex ptr_element_offset) { return host_data_imag() + ptr_element_offset; } + + /// Gets a reference to an element in host memory + Reference host_data(LongIndex idx) { + return PlanarComplexReference(host_data() + idx, host_data_imag() + idx); + } + + /// Gets pointer to host data + Element const * host_data() const { return host_.data(); } + + /// Gets pointer to host data imaginary part + Element const * host_data_imag() const { return host_.data() + imaginary_stride(); } + + /// Gets a constant reference to an element in host memory + ConstReference host_data(LongIndex idx) const { + return PlanarComplexReference(host_data() + idx, host_data_imag() + idx); + } + + /// Gets pointer to device data + Element * device_data() { return device_.get(); } + + /// Gets pointer to device data with a pointer offset + Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return device_.get() + ptr_element_offset; } + + /// Gets pointer to device data + Element const * device_data() const { return device_.get(); } + + /// Gets pointer to device data with a pointer offset + Element const * device_data_ptr_offset(LongIndex ptr_element_offset) const { return device_.get() + ptr_element_offset; } + + /// Gets a pointer to the device data imaginary part + Element * device_data_imag() { return device_.get() + imaginary_stride(); } + + /// Accesses the tensor reference pointing to data + TensorRef host_ref(LongIndex ptr_element_offset=0) { + return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); + } + + /// Returns a tensor reference to the real part of the tensor + cutlass::TensorRef host_ref_real() { + return cutlass::TensorRef(host_data(), layout_); + } + + /// Returns a tensor reference to the real part of the tensor + cutlass::TensorRef host_ref_imag() { + return cutlass::TensorRef(host_data_ptr_offset(imaginary_stride()), layout_); + } + + /// Accesses the tensor reference pointing to data + ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const { + return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); + } + + /// Accesses the tensor reference pointing to data + TensorRef device_ref(LongIndex ptr_element_offset=0) { + return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); + } + + /// Accesses the tensor reference pointing to data + ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const { + return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); + } + + /// Returns a tensor reference to the real part of the tensor + cutlass::TensorRef device_ref_real() { + return cutlass::TensorRef(device_data(), layout_); + } + + /// Returns a tensor reference to the real part of the tensor + cutlass::TensorRef device_ref_imag() { + return cutlass::TensorRef(device_data_ptr_offset(imaginary_stride()), layout_); + } + + /// Accesses the tensor reference pointing to data + TensorView host_view(LongIndex ptr_element_offset=0) { + return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); + } + + /// Accesses the tensor reference pointing to data + ConstTensorView host_view(LongIndex ptr_element_offset=0) const { + return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); + } + + /// Accesses the tensor reference pointing to data + cutlass::TensorView host_view_real() { + return cutlass::TensorView(host_data(), layout_, extent_); + } + + /// Accesses the tensor reference pointing to data + cutlass::TensorView host_view_imag() { + return cutlass::TensorView(host_data_ptr_offset(imaginary_stride()), layout_, extent_); + } + + /// Accesses the tensor reference pointing to data + TensorView device_view(LongIndex ptr_element_offset=0) { + return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); + } + + /// Accesses the tensor reference pointing to data + ConstTensorView device_view(LongIndex ptr_element_offset=0) const { + return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); + } + + /// Accesses the tensor reference pointing to data + cutlass::TensorView device_view_real() { + return cutlass::TensorView(device_data(), layout_, extent_); + } + + /// Accesses the tensor reference pointing to data + cutlass::TensorView device_view_imag() { + return cutlass::TensorView(device_data_ptr_offset(imaginary_stride()), layout_, extent_); + } + + /// Returns true if device memory is allocated + bool device_backed() const { + return (device_.get() == nullptr) ? false : true; + } + + /// Returns the layout object + Layout layout() const { + return layout_; + } + + /// Returns the layout object's stride vector + Stride stride() const { + return layout_.stride(); + } + + /// Returns the layout object's stride in a given physical dimension + Index stride(int dim) const { + return layout_.stride().at(dim); + } + + /// Computes the offset of an index from the origin of the tensor + LongIndex offset(TensorCoord const& coord) const { + return layout_(coord); + } + + /// Returns a reference to the element at the logical Coord in host memory + Reference at(TensorCoord const& coord) { + return host_data(offset(coord)); + } + + /// Returns a const reference to the element at the logical Coord in host memory + ConstReference at(TensorCoord const& coord) const { + return host_data(offset(coord)); + } + + /// Returns the extent of the tensor + TensorCoord extent() const { + return extent_; + } + + /// Returns the extent of the tensor + TensorCoord & extent() { + return extent_; + } + + /// Copies data from device to host + void sync_host() { + if (device_backed()) { + device_memory::copy_to_host( + host_data(), device_data(), imaginary_stride() * 2); + } + } + + /// Copies data from host to device + void sync_device() { + if (device_backed()) { + device_memory::copy_to_device( + device_data(), host_data(), imaginary_stride() * 2); + } + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_in_device_to_host( + Element const* ptr_device_real, ///< source device memory + Element const* ptr_device_imag, ///< source device memory + LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + + device_memory::copy_to_host( + host_data(), ptr_device_real, count); + + device_memory::copy_to_host( + host_data_imag(), ptr_device_imag, count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_in_device_to_device( + Element const* ptr_device_real, ///< source device memory + Element const* ptr_device_imag, ///< source device memory + LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + + device_memory::copy_device_to_device( + device_data(), ptr_device_real, count); + + device_memory::copy_device_to_device( + device_data_imag(), ptr_device_imag, count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_in_host_to_device( + Element const* ptr_host_real, ///< source host memory + Element const* ptr_host_imag, ///< source host memory + LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + + device_memory::copy_to_device( + device_data(), ptr_host_real, count); + + device_memory::copy_to_device( + device_data_imag(), ptr_host_imag, count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_in_host_to_host( + Element const* ptr_host_real, ///< source host memory + Element const* ptr_host_imag, ///< source host memory + LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + + device_memory::copy_host_to_host( + host_data(), ptr_host_real, count); + + device_memory::copy_host_to_host( + host_data_imag(), ptr_host_imag, count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_out_device_to_host( + Element * ptr_host_real, ///< source device memory + Element * ptr_host_imag, ///< source device memory + LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + + device_memory::copy_to_host( + ptr_host_real, device_data(), count); + + device_memory::copy_to_host( + ptr_host_imag, device_data_imag(), count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_out_device_to_device( + Element * ptr_device_real, ///< source device memory + Element * ptr_device_imag, ///< source device memory + LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + + device_memory::copy_device_to_device( + ptr_device_real, device_data(), count); + + device_memory::copy_device_to_device( + ptr_device_imag, device_data_imag(), count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_out_host_to_device( + Element * ptr_device_real, ///< source device memory + Element * ptr_device_imag, ///< source device memory + LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + + device_memory::copy_to_device( + ptr_device_real, host_data(), count); + + device_memory::copy_to_device( + ptr_device_imag, host_data_imag(), count); + } + + /// Copy data from a caller-supplied device pointer into host memory. + void copy_out_host_to_host( + Element * ptr_host_real, ///< source host memory + Element * ptr_host_imag, ///< source host memory + LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. + + if (count < 0) { + count = capacity(); + } + else { + count = __NV_STD_MIN(capacity(), count); + } + + device_memory::copy_host_to_host( + ptr_host_real, host_data(), count); + + device_memory::copy_host_to_host( + ptr_host_imag, host_data_imag(), count); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_uncompress.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_uncompress.h new file mode 100644 index 0000000000000000000000000000000000000000..7028bf7d25fe5e6ca3942c18891b9f0dfe12e800 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/host_uncompress.h @@ -0,0 +1,157 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief uncompress sparse matrix from the host side +*/ +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/tensor_view.h" +#include "cutlass/util/tensor_view_io.h" +#include "cutlass/util/reference/host/gemm.h" + +namespace cutlass { + +// uncompress sparse tensor core A matrix +template +void uncompress(TensorRef uncompressed_tensor_a, + TensorRef tensor_a, + TensorRef tensor_e, int row, int col) { + // How many uncompressed data we can get with ElementE meta data + int DecompressedElementsPerElementE = + 256 / cutlass::sizeof_bits::value; + + // Process 4bit meta data a time + int step; + + // 1:2 or 2:4 or 4:8 + int a, b; + + if (cutlass::sizeof_bits::value == 4) { + step = 8; + a = 4; + b = 8; + } else if (cutlass::sizeof_bits::value == 8) { + step = 4; + a = 2; + b = 4; + } else if (cutlass::sizeof_bits::value == 16) { + step = 4; + a = 2; + b = 4; + } else if (cutlass::sizeof_bits::value == 32) { + step = 2; + a = 1; + b = 2; + } + + int ElementsPerE = (cutlass::sizeof_bits::value == 4) ? 2 : 1; + + for (int r = 0; r < row; ++r) { + for (int c = 0; c < (col / DecompressedElementsPerElementE); ++c) { + + ElementE meta = tensor_e.at(MatrixCoord(r, c)); + + for (int i = 0; i < DecompressedElementsPerElementE; i += step) { + int e = (meta >> (i / step * 4)) & 0xf; + int idx0 = e & 0x3; + int idx1 = e >> 2; + + if (a == 1) idx0 = idx0 / 2; + + for (int ii = 0; ii < step; ii += ElementsPerE) { + int real_col = + c * DecompressedElementsPerElementE + i + ii; + int compressed_col = (real_col / b) * a; + + if (ii == (idx0 * ElementsPerE)) { + uncompressed_tensor_a.at(MatrixCoord(r, real_col)) = + tensor_a.at(MatrixCoord(r, compressed_col)); + if (ElementsPerE == 2) + uncompressed_tensor_a.at(MatrixCoord(r, real_col + 1)) = + tensor_a.at(MatrixCoord(r, compressed_col + 1)); + } else if ((ii == (idx1 * ElementsPerE)) && (a != 1)) { + uncompressed_tensor_a.at(MatrixCoord(r, real_col)) = + tensor_a.at(MatrixCoord(r, compressed_col + ElementsPerE)); + if (ElementsPerE == 2) + uncompressed_tensor_a.at(MatrixCoord(r, real_col + 1)) = + tensor_a.at( + MatrixCoord(r, compressed_col + ElementsPerE + 1)); + } else { + uncompressed_tensor_a.at(MatrixCoord(r, real_col)) = + ElementA(0); + if (ElementsPerE == 2) + uncompressed_tensor_a.at(MatrixCoord(r, real_col + 1)) = + ElementA(0); + } + } + } + } + } +} + +// uncompress ELL block sparse matrix +template +void uncompress_ell_block_sparse( + TensorRef uncompressed_tensor_a, + TensorRef tensor_a, + TensorRef ell_idx, + int rows, int cols, + int ell_num_cols, int ell_blocksize) { + + for (int r = 0; r < rows / ell_blocksize; ++r) { + for (int c = 0; c < ell_num_cols / ell_blocksize; ++c) { + + ElementE idx = ell_idx.at(MatrixCoord(r, c)); + + if (idx != -1) { + int row_begin = r * ell_blocksize; + int col_begin_real = idx * ell_blocksize; + int col_begin = c * ell_blocksize; + + for (int i = 0; i < ell_blocksize; ++i) { + for (int j = 0; j < ell_blocksize; ++j) { + uncompressed_tensor_a.at(MatrixCoord(row_begin + i, col_begin_real + j)) = + tensor_a.at( + MatrixCoord(row_begin + i, col_begin +j)); + } + } + } + } + } +} + +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/index_sequence.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/index_sequence.h new file mode 100644 index 0000000000000000000000000000000000000000..846e02ce13c53c66b3f4b320851bf97f21882e0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/index_sequence.h @@ -0,0 +1,38 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" + +// integer_sequence moved to cutlass/numeric_types.h + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/packed_stride.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/packed_stride.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b21582e0a807900fcd85503ab3f0b3c3c0998fd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/packed_stride.hpp @@ -0,0 +1,107 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Utilities for packing constructing canonical CuTe stride types for 3.x mainloop params. +*/ + +#pragma once + +#include "cute/stride.hpp" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Strides without batch mode + +template +cute::Stride> +make_cute_packed_stride(cute::Stride> s, cute::Shape shape_MKL) { + static_assert(std::is_integral_v, + "Stride must have an integral type so it can be set dynamically. Static strides not supported."); + auto s_copy = s; + cute::get<0>(s_copy) = static_cast(cute::get<1>(shape_MKL)); + return s_copy; +} + +template +cute::Stride, IntT> +make_cute_packed_stride(cute::Stride, IntT> s, cute::Shape shape_MKL) { + static_assert(std::is_integral_v, + "Stride must have an integral type so it can be set dynamically. Static strides not supported."); + auto s_copy = s; + cute::get<1>(s_copy) = static_cast(cute::get<0>(shape_MKL)); + return s_copy; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Strides with batch mode + +template +cute::Stride, int64_t> +make_cute_packed_stride(cute::Stride, int64_t> s, cute::Shape shape_MKL) { + static_assert(std::is_integral_v, + "Stride must have an integral type so it can be set dynamically. Static strides not supported."); + auto s_copy = s; + cute::get<0>(s_copy) = static_cast(cute::get<1>(shape_MKL)); + int batch_count = cute::get<2>(shape_MKL); + if (batch_count > 1) { + cute::get<2>(s_copy) = static_cast(cute::get<0>(shape_MKL) * cute::get<1>(shape_MKL)); + } + else { + cute::get<2>(s_copy) = static_cast(0); + } + return s_copy; +} + +template +cute::Stride, IntT, int64_t> +make_cute_packed_stride(cute::Stride, IntT, int64_t> s, cute::Shape shape_MKL) { + static_assert(std::is_integral_v, + "Stride must have an integral type so it can be set dynamically. Static strides not supported."); + auto s_copy = s; + cute::get<1>(s_copy) = static_cast(cute::get<0>(shape_MKL)); + int batch_count = cute::get<2>(shape_MKL); + if (batch_count > 1) { + cute::get<2>(s_copy) = static_cast(cute::get<0>(shape_MKL) * cute::get<1>(shape_MKL)); + } + else { + cute::get<2>(s_copy) = static_cast(0); + } + return s_copy; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/print_error.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/print_error.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4d84af8a657388fd7173531d16815a6c27db5569 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/print_error.hpp @@ -0,0 +1,290 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +// The computed infinity norm does not include +// any NaN column absolute-value sums. +struct matrix_inf_norm_result { + // Accumulate errors in double, as this is generally + // the highest precision that the examples use. + double inf_norm = 0.0; + bool found_nan = false; +}; + +// In theory, cute::Tensor, T> could be treated as a view type, +// and thus passed by value (as std::span or std::string_view would be). +// However, generic cute::Tensor are more like containers +// and thus are best passed by reference or const reference. +template +matrix_inf_norm_result +matrix_inf_norm(cute::Tensor const& host_matrix) +{ + using std::abs; + using error_type = decltype(std::declval().inf_norm); + + error_type inf_norm = 0.0; + bool found_nan = false; + + // Computing the infinity norm requires that we be able + // to treat the input as a matrix, with rows and columns. + const int64_t num_rows = cute::size<0>(host_matrix); + const int64_t num_cols = cute::size<1>(host_matrix); + + for(int64_t i = 0; i < num_rows; ++i) { + error_type row_abs_sum = 0.0; + for(int64_t j = 0; j < num_cols; ++j) { + row_abs_sum += abs(host_matrix(i, j)); + } + if(std::isnan(row_abs_sum)) { + found_nan = true; + } else { + inf_norm = row_abs_sum > inf_norm ? row_abs_sum : inf_norm; + } + } + + return {inf_norm, found_nan}; +} + +// Infinity norm of (X - Y). +template +matrix_inf_norm_result +matrix_diff_inf_norm(cute::Tensor const& X, + cute::Tensor const& Y) +{ + using std::abs; + using error_type = decltype(std::declval().inf_norm); + + assert(cute::size<0>(X) == cute::size<0>(Y)); + assert(cute::size<1>(X) == cute::size<1>(Y)); + + // Computing the infinity norm requires that we be able + // to treat the input as a matrix, with rows and columns. + const int64_t num_rows = cute::size<0>(X); + const int64_t num_cols = cute::size<1>(X); + + error_type inf_norm = 0.0; + bool found_nan = false; + + for(int64_t i = 0; i < num_rows; ++i) { + error_type row_abs_sum = 0.0; + for(int64_t j = 0; j < num_cols; ++j) { + row_abs_sum += abs(X(i,j) - Y(i,j)); + } + if(std::isnan(row_abs_sum)) { + found_nan = true; + } else { + inf_norm = row_abs_sum > inf_norm ? row_abs_sum : inf_norm; + } + } + + return {inf_norm, found_nan}; +} + +template +void +print_matrix_multiply_mollified_relative_error( + char const A_value_type_name[], + cute::Tensor const& A, + char const B_value_type_name[], + cute::Tensor const& B, + char const C_value_type_name[], + cute::Tensor const& C, + cute::Tensor const& C_ref) +{ + const auto [A_norm, A_has_nan] = matrix_inf_norm(A); + const auto [B_norm, B_has_nan] = matrix_inf_norm(B); + const auto [C_norm, C_has_nan] = matrix_inf_norm(C_ref); + const auto [diff_norm, diff_has_nan] = matrix_diff_inf_norm(C, C_ref); + + const auto A_norm_times_B_norm = A_norm * B_norm; + const auto relative_error = A_norm_times_B_norm == 0.0 ? + diff_norm : (diff_norm / A_norm_times_B_norm); + + // For expected error bounds, please refer to the LAPACK Users' Guide, + // in particular https://netlib.org/lapack/lug/node108.html . + // Printing the infinity norm of C is a way to check + // that both the function being tested (C) + // and the reference implementation (C_ref) + // don't just do nothing (or fill with zeros). + using std::cout; + using cute::shape; + cout << "Matrix A: " << shape<0>(A) << "x" << shape<1>(A) << " of " << A_value_type_name << '\n' + << "Matrix B: " << shape<0>(B) << "x" << shape<1>(B) << " of " << B_value_type_name << '\n' + << "Matrix C: " << shape<0>(C) << "x" << shape<1>(C) << " of " << C_value_type_name << '\n' + << std::scientific + << "Infinity norm of A: " << A_norm << '\n' + << "Infinity norm of B: " << B_norm << '\n' + << "Infinity norm of C: " << C_norm << '\n' + << "Infinity norm of (C - C_ref): " << diff_norm << '\n'; + + if(A_norm_times_B_norm == 0.0) { + cout << "Mollified relative error: " << relative_error << '\n'; + } else { + cout << "Relative error: " << relative_error << '\n'; + } + + if (A_has_nan || B_has_nan || C_has_nan || diff_has_nan) { + cout << "Did we encounter NaN in A? " << (A_has_nan ? "yes" : "no") << '\n' + << "Did we encounter NaN in B? " << (B_has_nan ? "yes" : "no") << '\n' + << "Did we encounter NaN in C? " << (C_has_nan ? "yes" : "no") << '\n' + << "Did we encounter NaN in (C - C_ref)? " << (diff_has_nan ? "yes" : "no") << '\n'; + } +} + +template +void +print_matrix_multiply_mollified_relative_error( + const char value_type_name[], + const cute::Tensor& A, + const cute::Tensor& B, + const cute::Tensor& C_computed, + const cute::Tensor& C_expected) +{ + print_matrix_multiply_mollified_relative_error(value_type_name, A, value_type_name, B, + value_type_name, C_computed, C_expected); +} + +// Take a CUTLASS HostTensor (or the like) as input, +// and return a const CuTe Tensor. +// This is useful for use with the above error printing functions. +// This implicitly "transposes" if the layout is RowMajor. +// Note that the HostTensor must be captured by nonconst reference +// in order for X.host_ref().data() to compile. +// (CUTLASS is a bit more container-y than CuTe.) +template +auto host_matrix_to_const_cute_tensor(CutlassHostTensorType& X) +{ + // The tensors were created with post-transposed extents. + const auto extents = X.extent(); + const auto shape = cute::Shape{extents[0], extents[1]}; + // Both RowMajor and ColumnMajor only store one stride. + const int LDX = X.stride(0); + const auto strides = [&]() { + using input_layout_type = typename std::decay_t::Layout; + if constexpr (std::is_same_v) { + return cute::Stride{1, LDX}; + } + else { + static_assert(std::is_same_v); + return cute::Stride{LDX, 1}; + } + }(); + const auto layout = cute::make_layout(shape, strides); + auto X_data = X.host_ref().data(); + auto X_data_const = const_cast >(X_data); + return cute::make_tensor(X_data_const, layout); +}; + + +// Returns EXIT_SUCCESS if the 2-norm relative error is exactly zero, else returns EXIT_FAILURE. +// This makes the return value suitable as the return value of main(). +template +int +print_relative_error( + std::size_t n, + T1 const& data, + T2 const& reference, + bool print_verbose = false, + bool print_error = true) { + using std::abs; using std::sqrt; + + // Use either double or complex for error computation + using value_type = cute::remove_cvref_t; + using error_type = std::conditional_t::value, + cute::complex, + double>; + + if (print_verbose) { + std::cout << "Idx:\t"<< "Val\t" << "RefVal\t" << "RelError" << std::endl; + } + + double eps = 1e-200; + + double tot_error_sq = 0; + double tot_norm_sq = 0; + double tot_ind_rel_err = 0; + double max_ind_rel_err = 0; + for (std::size_t i = 0; i < n; ++i) + { + error_type val = data[i]; + error_type ref = reference[i]; + + double aref = abs(ref); + double diff = abs(ref - val); + double rel_error = diff / (aref + eps); + + // Individual relative error + tot_ind_rel_err += rel_error; + + // Maximum relative error + max_ind_rel_err = std::max(max_ind_rel_err, rel_error); + + // Total relative error + tot_error_sq += diff * diff; + tot_norm_sq += aref * aref; + + if (print_verbose) { + std::cout << i << ":\t" << val << "\t" << ref << "\t" << rel_error << std::endl; + } + } + + printf("Vector reference norm: [%.5e]\n", sqrt(tot_norm_sq)); + + double tot_rel_err = sqrt(tot_error_sq/(tot_norm_sq+eps)); + if (print_error) + printf("Vector relative error: [%.5e]\n", tot_rel_err); + + double ave_rel_err = tot_ind_rel_err / double(n); + if (print_error) + printf("Average relative error: [%.5e]\n", ave_rel_err); + + if (print_error) + printf("Maximum relative error: [%.5e]\n", max_ind_rel_err); + + return (tot_rel_err == 0.0) ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/detail/inner_product.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/detail/inner_product.h new file mode 100644 index 0000000000000000000000000000000000000000..b4bffa3889a0a745038ed251fa8a752efd924e4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/detail/inner_product.h @@ -0,0 +1,135 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for GEMM in host-side code. +*/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" + +namespace cutlass { +namespace reference { +namespace detail { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Template function to compute an inner product. +#pragma hd_warning_disable // Suppresses warnings when attempting to instantiate with a + // host-only type +template +CUTLASS_HOST_DEVICE +Ctype inner_product(Atype a, Btype b, Ctype c) { + return Ctype(a) * Ctype(b) + c; +} + +/// Specialization for matrix multiplication with binary operands +template <> +CUTLASS_HOST_DEVICE +int inner_product, Array, int>( + Array a, + Array b, + int c) { + + int accum = 0; + for (int bit = 0; bit < 32; bit++) { + accum += a[bit] ^ b[bit]; + } + return accum + c; +} + +/* +/// Specialization for matrix multiplication with signed 4-bit integer operands +template <> +CUTLASS_HOST_DEVICE +int inner_product, Array, int>( + Array a, + Array b, + int c) { + + int accum = 0; + for (int k = 0; k < 8; k++) { + accum += a[k] * b[k]; + } + return accum + c; +} + +/// Specialization for matrix multiplication with unsigned 4-bit integer operands +template <> +CUTLASS_HOST_DEVICE +int inner_product, Array, int>( + Array a, + Array b, + int c) { + + int accum = 0; + for (int k = 0; k < 8; k++) { + accum += a[k] * b[k]; + } + return accum + c; +} +*/ + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct Cast { + // Default behavior: convert to the destination type +#pragma hd_warning_disable // Suppresses warnings when attempting to instantiate complex with a + // host-only type + CUTLASS_HOST_DEVICE + static DstType apply(SrcType src) { return static_cast(src); }; +}; + +template <> +struct Cast { + CUTLASS_HOST_DEVICE + static int8_t apply(float src) { + // Clamp to the range of signed 8-bit integers. + return static_cast(fmaxf(-128.f, fminf(127.f, src))); + }; +}; + +template <> +struct Cast { + CUTLASS_HOST_DEVICE + static uint8_t apply(float src) { + // Clamp to the range of signed 8-bit integers. + return static_cast(fmaxf(0.f, fminf(255.f, src))); + }; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace detail +} // namespace reference +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/detail/linear_to_coordinate.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/detail/linear_to_coordinate.h new file mode 100644 index 0000000000000000000000000000000000000000..ac2269999bc52ab29147a1b11212db647fef5935 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/detail/linear_to_coordinate.h @@ -0,0 +1,94 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for GEMM in host-side code. +*/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/coord.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reference { +namespace detail { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct LinearToCoordinateHelper { + + CUTLASS_HOST_DEVICE + void operator()(Coord &coord, int64_t idx, Coord const &extent) const { + + int64_t prod = 1; + + CUTLASS_PRAGMA_UNROLL + for (int i = Rank - Index; i < Rank; ++i) { + prod *= int64_t(extent[i]); + } + + coord[Rank - Index - 1] = int(idx / prod); + + int64_t residual = idx % prod; + LinearToCoordinateHelper()(coord, residual, extent); + } +}; + +template +struct LinearToCoordinateHelper { + + CUTLASS_HOST_DEVICE + void operator()(Coord &coord, int64_t idx, Coord const &extent) const { + coord[Rank - 1] = int(idx); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct LinearToCoordinate { + + CUTLASS_HOST_DEVICE + void operator()(Coord &coord, int64_t idx, Coord const &extent) const { + LinearToCoordinateHelper()(coord, idx, extent); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace detail +} // namespace reference +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/convolution.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..fec058724efe3547678052a7f23505e330d9f297 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/convolution.h @@ -0,0 +1,1549 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Reference implementation for convolution in device-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/functional.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/conv/convolution.h" +#include "cutlass/conv/conv2d_problem_size.h" +#include "cutlass/conv/conv3d_problem_size.h" + +namespace cutlass { +namespace reference { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////////////////////////// +/// Conv2d device reference kernel +//////////////////////////////////////////////////////////////////////////////////////////////////// + +// Conv2d Fprop kernel - y = fprop(x, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add, + int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension + int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension + int kCtaShapeM = 16, // shape of a threadblock in units of threads + int kCtaShapeN = 8 // shape of a threadblock in units of threads +> +__global__ void Conv2dFprop( + conv::Conv2dProblemSize problem_size, + TensorRef tensor_x, + TensorRef tensor_w, + TensorRef tensor_y_in, + TensorRef tensor_y_out, + ElementCompute alpha, + ElementCompute beta + ) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + ElementAccumulator element_A[kThreadM]; + ElementAccumulator element_B[kThreadN]; + ElementAccumulator accum[kThreadM][kThreadN]; + + int64_t npq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM; + int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN; + + int thread_n[kThreadM]; + int thread_p[kThreadM]; + int thread_q[kThreadM]; + + // Compute N, P, Q coordinates for each row of a thread's tile + int64_t PQ = int64_t(problem_size.P) * problem_size.Q; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + int64_t npq = npq_start + m; + + thread_n[m] = int(npq / PQ); + + int64_t residual = npq % PQ; + thread_p[m] = int(residual / problem_size.Q); + thread_q[m] = int(residual % problem_size.Q); + } + + // Clear accumulators + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = ElementAccumulator(); + } + } + + int c_per_group = problem_size.C / problem_size.groups; + int k_per_group = problem_size.K / problem_size.groups; + + // Compute convolution + for (int R = 0; R < problem_size.R; ++R) { + for (int S = 0; S < problem_size.S; ++S) { + for (int C = 0; C < problem_size.C; ++C) { + + // Get group id of currnet channel + int c_group_idx = C / c_per_group; + + // Load from activations tensor + int filter_r = R; + int filter_s = S; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_r = problem_size.R - 1 - R; + filter_s = problem_size.S - 1 - S; + } + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + int h = thread_p[m] * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h; + int w = thread_q[m] * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w; + + if (thread_n[m] < problem_size.N && h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W) { + element_A[m] = ElementAccumulator(tensor_x.at({thread_n[m], h, w, C})); + } + else { + element_A[m] = ElementAccumulator(); + } + } + + // Load from filters tensor + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + int thread_k = k_start + n; + int k_group_idx = thread_k / k_per_group; + + if (thread_k < problem_size.K && k_group_idx == c_group_idx) { + element_B[n] = ElementAccumulator(tensor_w.at({thread_k, R, S, C % c_per_group})); + } + else { + element_B[n] = ElementAccumulator(); + } + } + + // Accumulate matrix product + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]); + } + } + } + } + } + + // Write out the results + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + if (thread_n[m] < problem_size.N && thread_p[m] < problem_size.P && thread_q[m] < problem_size.Q) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + int thread_k = k_start + n; + if (thread_k < problem_size.K) { + + ElementCompute c_ref = ElementCompute(); + if (beta != ElementCompute()) { + c_ref = ElementCompute(tensor_y_in.at({thread_n[m], thread_p[m], thread_q[m], thread_k})); + } + + tensor_y_out.at({thread_n[m], thread_p[m], thread_q[m], thread_k}) = convert_op( + alpha * ElementCompute(accum[m][n]) + beta * c_ref); + } + } + } + } +} + +// Conv3d Fprop kernel - y = fprop(x, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add, + int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension + int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension + int kCtaShapeM = 16, // shape of a threadblock in units of threads + int kCtaShapeN = 8 // shape of a threadblock in units of threads +> +__global__ void Conv3dFprop( + conv::Conv3dProblemSize problem_size, + TensorRef tensor_x, + TensorRef tensor_w, + TensorRef tensor_y_in, + TensorRef tensor_y_out, + ElementCompute alpha, + ElementCompute beta + ) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + ElementAccumulator element_A[kThreadM]; + ElementAccumulator element_B[kThreadN]; + ElementAccumulator accum[kThreadM][kThreadN]; + + int64_t nzpq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM; + int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN; + + int thread_n[kThreadM]; + int thread_z[kThreadM]; + int thread_p[kThreadM]; + int thread_q[kThreadM]; + + // Compute N, Z, P, Q coordinates for each row of a thread's tile + int64_t PQ = int64_t(problem_size.P) * problem_size.Q; + int64_t ZPQ = PQ * problem_size.Z; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + int64_t nzpq = nzpq_start + m; + + thread_n[m] = int(nzpq / ZPQ); + + int64_t residual = nzpq % ZPQ; + thread_z[m] = int(residual / PQ); + + residual = residual % PQ; + thread_p[m] = int(residual / problem_size.Q); + thread_q[m] = int(residual % problem_size.Q); + } + + // Clear accumulators + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = ElementAccumulator(); + } + } + + // Compute convolution + for (int T = 0; T < problem_size.T; ++T) { + for (int R = 0; R < problem_size.R; ++R) { + for (int S = 0; S < problem_size.S; ++S) { + for (int C = 0; C < problem_size.C; ++C) { + + // Load from activations tensor + int filter_t = T; + int filter_r = R; + int filter_s = S; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_t = problem_size.T - 1 - T; + filter_r = problem_size.R - 1 - R; + filter_s = problem_size.S - 1 - S; + } + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + int d = thread_z[m] * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d; + int h = thread_p[m] * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h; + int w = thread_q[m] * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w; + + if (thread_n[m] < problem_size.N && + d >= 0 && d < problem_size.D && + h >= 0 && h < problem_size.H && + w >= 0 && w < problem_size.W) { + + element_A[m] = ElementAccumulator(tensor_x.at({thread_n[m], d, h, w, C})); + } + else { + element_A[m] = ElementAccumulator(); + } + } + + // Load from filters tensor + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + int thread_k = k_start + n; + + if (thread_k < problem_size.K) { + element_B[n] = ElementAccumulator(tensor_w.at({thread_k, T, R, S, C})); + } + else { + element_B[n] = ElementAccumulator(); + } + } + + // Accumulate matrix product + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]); + } + } + + } // for (C) + } // for (S) + } // for (R) + } // for (T) + + // Write out the results + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + if (thread_n[m] < problem_size.N && + thread_z[m] < problem_size.Z && + thread_p[m] < problem_size.P && + thread_q[m] < problem_size.Q) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + int thread_k = k_start + n; + if (thread_k < problem_size.K) { + + ElementCompute c_ref = ElementCompute(); + if (beta != ElementCompute()) { + c_ref = ElementCompute(tensor_y_in.at({thread_n[m], thread_z[m], thread_p[m], thread_q[m], thread_k})); + } + + tensor_y_out.at({thread_n[m], thread_z[m], thread_p[m], thread_q[m], thread_k}) = convert_op( + alpha * ElementCompute(accum[m][n]) + beta * c_ref); + } + } // for (n) + + } + } // for (m) +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Conv2d dgrad kernel - dx = dgrad(dy, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add, + int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension + int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension + int kCtaShapeM = 16, // shape of a threadblock in units of threads + int kCtaShapeN = 8 // shape of a threadblock in units of threads +> +__global__ void Conv2dDgrad( + conv::Conv2dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_w, + TensorRef tensor_dx_in, + TensorRef tensor_dx_out, + ElementCompute alpha, + ElementCompute beta + ) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + ElementAccumulator element_A[kThreadM]; + ElementAccumulator element_B[kThreadN]; + ElementAccumulator accum[kThreadM][kThreadN]; + + int64_t nhw_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM; + int c_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN; + + int thread_n[kThreadM]; + int thread_h[kThreadM]; + int thread_w[kThreadM]; + + // Compute N, H, W coordinates for each row of a thread's tile + int64_t HW = int64_t(problem_size.H) * problem_size.W; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + int64_t nhw = nhw_start + m; + + thread_n[m] = int(nhw / HW); + + int64_t residual = nhw % HW; + thread_h[m] = int(residual / problem_size.W); + thread_w[m] = int(residual % problem_size.W); + } + + // Clear accumulators + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = ElementAccumulator(); + } + } + + // Compute convolution + for (int R = 0; R < problem_size.R; ++R) { + for (int S = 0; S < problem_size.S; ++S) { + for (int K = 0; K < problem_size.K; ++K) { + + // Load from activations tensor + int filter_r = R; + int filter_s = S; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_r = problem_size.R - 1 - R; + filter_s = problem_size.S - 1 - S; + } + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + int p = thread_h[m] + problem_size.pad_h - filter_r * problem_size.dilation_h; + int q = thread_w[m] + problem_size.pad_w - filter_s * problem_size.dilation_w; + + element_A[m] = ElementAccumulator(); + + if (p >= 0 && !(p % problem_size.stride_h) && q >= 0 && !(q % problem_size.stride_w)) { + + p = p / problem_size.stride_h; + q = q / problem_size.stride_w; + + if (thread_n[m] < problem_size.N && p < problem_size.P && q < problem_size.Q) { + element_A[m] = ElementAccumulator(tensor_dy.at({thread_n[m], p, q, K})); + } + } + } + + // Load from filters tensor + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + int thread_c = c_start + n; + + if (thread_c < problem_size.C) { + element_B[n] = ElementAccumulator(tensor_w.at({K, R, S, thread_c})); + } + else { + element_B[n] = ElementAccumulator(); + } + } + + // Accumulate matrix product + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]); + } + } + } + } + } + + // Write out the results + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + if (thread_n[m] < problem_size.N && thread_h[m] < problem_size.H && thread_w[m] < problem_size.W) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + int thread_c = c_start + n; + if (thread_c < problem_size.C) { + + ElementCompute c_ref = ElementCompute(); + if (beta != ElementCompute()) { + c_ref = ElementCompute(tensor_dx_in.at({thread_n[m], thread_h[m], thread_w[m], thread_c})); + } + + tensor_dx_out.at({thread_n[m], thread_h[m], thread_w[m], thread_c}) = convert_op( + alpha * ElementCompute(accum[m][n]) + beta * c_ref); + } + } + } + } +} + +// Conv3d dgrad kernel - dx = dgrad(dy, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add, + int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension + int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension + int kCtaShapeM = 16, // shape of a threadblock in units of threads + int kCtaShapeN = 8 // shape of a threadblock in units of threads +> +__global__ void Conv3dDgrad( + conv::Conv3dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_w, + TensorRef tensor_dx_in, + TensorRef tensor_dx_out, + ElementCompute alpha, + ElementCompute beta + ) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + ElementAccumulator element_A[kThreadM]; + ElementAccumulator element_B[kThreadN]; + ElementAccumulator accum[kThreadM][kThreadN]; + + int64_t ndhw_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM; + int c_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN; + + int thread_n[kThreadM]; + int thread_d[kThreadM]; + int thread_h[kThreadM]; + int thread_w[kThreadM]; + + // Compute N, H, W coordinates for each row of a thread's tile + int64_t HW = int64_t(problem_size.H) * problem_size.W; + int64_t DHW = HW * problem_size.D; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + int64_t ndhw = ndhw_start + m; + + thread_n[m] = int(ndhw / DHW); + + int64_t residual = ndhw % DHW; + thread_d[m] = int(residual / HW); + + residual = residual % HW; + thread_h[m] = int(residual / problem_size.W); + thread_w[m] = int(residual % problem_size.W); + } + + // Clear accumulators + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = ElementAccumulator(); + } + } + + // Compute convolution + for (int T = 0; T < problem_size.T; ++T) { + for (int R = 0; R < problem_size.R; ++R) { + for (int S = 0; S < problem_size.S; ++S) { + for (int K = 0; K < problem_size.K; ++K) { + + // Load from activations tensor + int filter_t = T; + int filter_r = R; + int filter_s = S; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_t = problem_size.T - 1 - T; + filter_r = problem_size.R - 1 - R; + filter_s = problem_size.S - 1 - S; + } + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + int z = thread_d[m] + problem_size.pad_d - filter_t * problem_size.dilation_d; + int p = thread_h[m] + problem_size.pad_h - filter_r * problem_size.dilation_h; + int q = thread_w[m] + problem_size.pad_w - filter_s * problem_size.dilation_w; + + element_A[m] = ElementAccumulator(); + + if (z >= 0 && !(z % problem_size.stride_d) && + p >= 0 && !(p % problem_size.stride_h) && + q >= 0 && !(q % problem_size.stride_w)) { + + z = z / problem_size.stride_d; + p = p / problem_size.stride_h; + q = q / problem_size.stride_w; + + if (thread_n[m] < problem_size.N && z < problem_size.Z && p < problem_size.P && q < problem_size.Q) { + element_A[m] = ElementAccumulator(tensor_dy.at({thread_n[m], z, p, q, K})); + } + } + } + + // Load from filters tensor + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + int thread_c = c_start + n; + + if (thread_c < problem_size.C) { + element_B[n] = ElementAccumulator(tensor_w.at({K, T, R, S, thread_c})); + } + else { + element_B[n] = ElementAccumulator(); + } + } + + // Accumulate matrix product + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]); + } + } + + } // for (C) + } // for (S) + } // for (R) + } // for (T) + + // Write out the results + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + + if (thread_n[m] < problem_size.N && + thread_d[m] < problem_size.D && + thread_h[m] < problem_size.H && + thread_w[m] < problem_size.W) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + int thread_c = c_start + n; + if (thread_c < problem_size.C) { + + ElementCompute c_ref = ElementCompute(); + if (beta != ElementCompute()) { + c_ref = ElementCompute(tensor_dx_in.at({thread_n[m], thread_d[m], thread_h[m], thread_w[m], thread_c})); + } + + tensor_dx_out.at({thread_n[m], thread_d[m], thread_h[m], thread_w[m], thread_c}) = convert_op( + alpha * ElementCompute(accum[m][n]) + beta * c_ref); + } + } + } + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Conv2d wgrad kernel - dw = wgrad(dy, x) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add, + int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension + int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension + int kCtaShapeM = 8, // shape of a threadblock in units of threads + int kCtaShapeN = 16 // shape of a threadblock in units of threads +> +__global__ void Conv2dWgrad( + conv::Conv2dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_x, + TensorRef tensor_dw_in, + TensorRef tensor_dw_out, + ElementCompute alpha, + ElementCompute beta + ) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + ElementAccumulator element_A[kThreadM]; + ElementAccumulator element_B[kThreadN]; + ElementAccumulator accum[kThreadM][kThreadN]; + + int k_start = blockIdx.x * kCtaShapeM * kThreadM + threadIdx.x * kThreadM; + int64_t rsc_start = int64_t(blockIdx.y) * kCtaShapeN * kThreadN + threadIdx.y * kThreadN; + + int thread_r[kThreadN]; + int thread_s[kThreadN]; + int thread_c[kThreadN]; + + // Compute R, S, C coordinates for each row of a thread's tile + int64_t SC = int64_t(problem_size.S) * problem_size.C; + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + + int64_t rsc = rsc_start + n; + int64_t residual = rsc % SC; + + thread_r[n] = int(rsc / SC); + thread_s[n] = int(residual / problem_size.C); + thread_c[n] = int(residual % problem_size.C); + } + + // Clear accumulators + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = ElementAccumulator(); + } + } + + // Compute convolution + for (int N = 0; N < problem_size.N; ++N) { + for (int P = 0; P < problem_size.P; ++P) { + for (int Q = 0; Q < problem_size.Q; ++Q) { + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + int thread_k = k_start + m; + + element_A[m] = ElementAccumulator(); + + if (thread_k < problem_size.K) { + element_A[m] = ElementAccumulator(tensor_dy.at({N, P, Q, thread_k})); + } + } + + // Load from filters tensor + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + + // Load from activations tensor + int filter_r = thread_r[n]; + int filter_s = thread_s[n]; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_r = problem_size.R - 1 - filter_r; + filter_s = problem_size.S - 1 - filter_s; + } + + int h = P * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h; + int w = Q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w; + + element_B[n] = ElementAccumulator(); + + if (h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W && thread_c[n] < problem_size.C) { + element_B[n] = ElementAccumulator(tensor_x.at({N, h, w, thread_c[n]})); + } + } + + // Accumulate matrix product + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]); + } + } + } + } + } + + // Write out the results + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + int thread_k = k_start + m; + + if (thread_k < problem_size.K) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + + if (thread_r[n] < problem_size.R && thread_s[n] < problem_size.S && thread_c[n] < problem_size.C) { + + ElementCompute c_ref = ElementCompute(); + + if (beta != ElementCompute()) { + c_ref = ElementCompute(tensor_dw_in.at({thread_k, thread_r[n], thread_s[n], thread_c[n]})); + } + + tensor_dw_out.at({thread_k, thread_r[n], thread_s[n], thread_c[n]}) = convert_op( + alpha * ElementCompute(accum[m][n]) + beta * c_ref); + } + } + } + } +} + +// Conv3d wgrad kernel - dw = wgrad(dy, x) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add, + int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension + int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension + int kCtaShapeM = 8, // shape of a threadblock in units of threads + int kCtaShapeN = 16 // shape of a threadblock in units of threads +> +__global__ void Conv3dWgrad( + conv::Conv3dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_x, + TensorRef tensor_dw_in, + TensorRef tensor_dw_out, + ElementCompute alpha, + ElementCompute beta + ) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + ElementAccumulator element_A[kThreadM]; + ElementAccumulator element_B[kThreadN]; + ElementAccumulator accum[kThreadM][kThreadN]; + + int k_start = blockIdx.x * kCtaShapeM * kThreadM + threadIdx.x * kThreadM; + int64_t trsc_start = int64_t(blockIdx.y) * kCtaShapeN * kThreadN + threadIdx.y * kThreadN; + + int thread_t[kThreadN]; + int thread_r[kThreadN]; + int thread_s[kThreadN]; + int thread_c[kThreadN]; + + // Compute R, S, C coordinates for each row of a thread's tile + int64_t SC = int64_t(problem_size.S) * problem_size.C; + int64_t RSC = SC * problem_size.R; + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + + int64_t trsc = trsc_start + n; + + thread_t[n] = int(trsc / RSC); + + int64_t residual = trsc % RSC; + thread_r[n] = int(residual / SC); + + residual = residual % SC; + thread_s[n] = int(residual / problem_size.C); + thread_c[n] = int(residual % problem_size.C); + } + + // Clear accumulators + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = ElementAccumulator(); + } + } + + // Compute convolution + for (int N = 0; N < problem_size.N; ++N) { + for (int Z = 0; Z < problem_size.Z; ++Z) { + for (int P = 0; P < problem_size.P; ++P) { + for (int Q = 0; Q < problem_size.Q; ++Q) { + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + int thread_k = k_start + m; + + element_A[m] = ElementAccumulator(); + + if (thread_k < problem_size.K) { + element_A[m] = ElementAccumulator(tensor_dy.at({N, Z, P, Q, thread_k})); + } + } + + // Load from filters tensor + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + + // Load from activations tensor + int filter_t = thread_t[n]; + int filter_r = thread_r[n]; + int filter_s = thread_s[n]; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_t = problem_size.T - 1 - filter_t; + filter_r = problem_size.R - 1 - filter_r; + filter_s = problem_size.S - 1 - filter_s; + } + + int d = Z * problem_size.stride_d - problem_size.pad_w + filter_t * problem_size.dilation_d; + int h = P * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h; + int w = Q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w; + + element_B[n] = ElementAccumulator(); + + if (d >= 0 && d < problem_size.D && + h >= 0 && h < problem_size.H && + w >= 0 && w < problem_size.W && + thread_c[n] < problem_size.C) { + + element_B[n] = ElementAccumulator(tensor_x.at({N, d, h, w, thread_c[n]})); + } + } + + // Accumulate matrix product + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]); + } + } + + } // for (Q) + } // for (P) + } // for (Z) + } // for (N) + + // Write out the results + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kThreadM; ++m) { + int thread_k = k_start + m; + + if (thread_k < problem_size.K) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kThreadN; ++n) { + + if (thread_t[n] < problem_size.T && + thread_r[n] < problem_size.R && + thread_s[n] < problem_size.S && + thread_c[n] < problem_size.C) { + + ElementCompute c_ref = ElementCompute(); + + if (beta != ElementCompute()) { + c_ref = ElementCompute(tensor_dw_in.at({thread_k, thread_t[n], thread_r[n], thread_s[n], thread_c[n]})); + } + + tensor_dw_out.at({thread_k, thread_t[n], thread_r[n], thread_s[n], thread_c[n]}) = convert_op( + alpha * ElementCompute(accum[m][n]) + beta * c_ref); + } + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Conv2d Fprop dispatcher - y = fprop(x, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +Status Conv2dFprop( + conv::Conv2dProblemSize problem_size, + TensorRef tensor_x, + TensorRef tensor_w, + TensorRef tensor_y_in, + TensorRef tensor_y_out, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr) { + + // + // Blocking factors improve performance of reference implementation + // + + int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension + int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension + int const kCtaShapeM = 16; // shape of a threadblock in units of threads + int const kCtaShapeN = 8; // shape of a threadblock in units of threads + + int64_t npq = int64_t(problem_size.N) * problem_size.P * problem_size.Q; + int64_t blocks_m = (npq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM); + + dim3 block(kCtaShapeM, kCtaShapeN); + dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN)); + + kernel::Conv2dFprop< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp, + kThreadM, + kThreadN, + kCtaShapeM, + kCtaShapeN + ><<< grid, block, 0, stream >>>( + problem_size, + tensor_x, + tensor_w, + tensor_y_in, + tensor_y_out, + alpha, + beta + ); + + cudaError_t result = cudaPeekAtLastError(); + if (result != cudaSuccess) { + return Status::kErrorInternal; + } + + return Status::kSuccess; +} + +/// Conv3d Fprop dispatcher - y = fprop(x, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +Status Conv3dFprop( + conv::Conv3dProblemSize problem_size, + TensorRef tensor_x, + TensorRef tensor_w, + TensorRef tensor_y_in, + TensorRef tensor_y_out, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr) { + + // + // Blocking factors improve performance of reference implementation + // + + int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension + int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension + int const kCtaShapeM = 16; // shape of a threadblock in units of threads + int const kCtaShapeN = 8; // shape of a threadblock in units of threads + + int64_t nzpq = int64_t(problem_size.N) * problem_size.Z * problem_size.P * problem_size.Q; + int64_t blocks_m = (nzpq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM); + + dim3 block(kCtaShapeM, kCtaShapeN); + dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN)); + + kernel::Conv3dFprop< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp, + kThreadM, + kThreadN, + kCtaShapeM, + kCtaShapeN + ><<< grid, block, 0, stream >>>( + problem_size, + tensor_x, + tensor_w, + tensor_y_in, + tensor_y_out, + alpha, + beta + ); + + cudaError_t result = cudaPeekAtLastError(); + if (result != cudaSuccess) { + return Status::kErrorInternal; + } + + return Status::kSuccess; +} + +/// Conv2d Dgrad dispatcher - dx = dgrad(dy, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +Status Conv2dDgrad( + conv::Conv2dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_w, + TensorRef tensor_dx_in, + TensorRef tensor_dx_out, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr) { + + // + // Blocking factors improve performance of reference implementation + // + + int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension + int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension + int const kCtaShapeM = 16; // shape of a threadblock in units of threads + int const kCtaShapeN = 8; // shape of a threadblock in units of threads + + int64_t nhw = int64_t(problem_size.N) * problem_size.H * problem_size.W; + int64_t blocks_m = (nhw + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM); + + dim3 block(kCtaShapeM, kCtaShapeN); + dim3 grid(uint32_t(blocks_m), (problem_size.C + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN)); + + kernel::Conv2dDgrad< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp, + kThreadM, + kThreadN, + kCtaShapeM, + kCtaShapeN + ><<< grid, block, 0, stream >>>( + problem_size, + tensor_dy, + tensor_w, + tensor_dx_in, + tensor_dx_out, + alpha, + beta + ); + + cudaError_t result = cudaPeekAtLastError(); + if (result != cudaSuccess) { + return Status::kErrorInternal; + } + + return Status::kSuccess; +} + +/// Conv3d Dgrad dispatcher - dx = dgrad(dy, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +Status Conv3dDgrad( + conv::Conv3dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_w, + TensorRef tensor_dx_in, + TensorRef tensor_dx_out, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr) { + + // + // Blocking factors improve performance of reference implementation + // + + int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension + int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension + int const kCtaShapeM = 16; // shape of a threadblock in units of threads + int const kCtaShapeN = 8; // shape of a threadblock in units of threads + + int64_t ndhw = int64_t(problem_size.N) * problem_size.D * problem_size.H * problem_size.W; + int64_t blocks_m = (ndhw + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM); + + dim3 block(kCtaShapeM, kCtaShapeN); + dim3 grid(uint32_t(blocks_m), (problem_size.C + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN)); + + kernel::Conv3dDgrad< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp, + kThreadM, + kThreadN, + kCtaShapeM, + kCtaShapeN + ><<< grid, block, 0, stream >>>( + problem_size, + tensor_dy, + tensor_w, + tensor_dx_in, + tensor_dx_out, + alpha, + beta + ); + + cudaError_t result = cudaPeekAtLastError(); + if (result != cudaSuccess) { + return Status::kErrorInternal; + } + + return Status::kSuccess; +} + +/// Conv2d Wgrad dispatcher - dw = wgrad(dy, x) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +Status Conv2dWgrad( + conv::Conv2dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_x, + TensorRef tensor_dw_in, + TensorRef tensor_dw_out, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr) { + + // + // Blocking factors improve performance of reference implementation + // + + int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension + int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension + int const kCtaShapeM = 8; // shape of a threadblock in units of threads + int const kCtaShapeN = 16; // shape of a threadblock in units of threads + + int64_t rsc = int64_t(problem_size.R) * problem_size.S * problem_size.C; + int64_t blocks_n = (rsc + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN); + + dim3 block(kCtaShapeM, kCtaShapeN); + dim3 grid((problem_size.K + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM), uint32_t(blocks_n)); + + kernel::Conv2dWgrad< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp, + kThreadM, + kThreadN, + kCtaShapeM, + kCtaShapeN + ><<< grid, block, 0, stream >>>( + problem_size, + tensor_dy, + tensor_x, + tensor_dw_in, + tensor_dw_out, + alpha, + beta + ); + + cudaError_t result = cudaPeekAtLastError(); + if (result != cudaSuccess) { + return Status::kErrorInternal; + } + + return Status::kSuccess; +} + +/// Conv3d Wgrad dispatcher - dw = wgrad(dy, x) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +Status Conv3dWgrad( + conv::Conv3dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_x, + TensorRef tensor_dw_in, + TensorRef tensor_dw_out, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr) { + + // + // Blocking factors improve performance of reference implementation + // + + int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension + int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension + int const kCtaShapeM = 8; // shape of a threadblock in units of threads + int const kCtaShapeN = 16; // shape of a threadblock in units of threads + + int64_t trsc = int64_t(problem_size.T) * problem_size.R * problem_size.S * problem_size.C; + int64_t blocks_n = (trsc + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN); + + dim3 block(kCtaShapeM, kCtaShapeN); + dim3 grid((problem_size.K + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM), uint32_t(blocks_n)); + + kernel::Conv3dWgrad< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, + InnerProductOp, + kThreadM, + kThreadN, + kCtaShapeM, + kCtaShapeN + ><<< grid, block, 0, stream >>>( + problem_size, + tensor_dy, + tensor_x, + tensor_dw_in, + tensor_dw_out, + alpha, + beta + ); + + cudaError_t result = cudaPeekAtLastError(); + if (result != cudaSuccess) { + return Status::kErrorInternal; + } + + return Status::kSuccess; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Generic 2D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +Status Conv2d( + conv::Operator convolutional_operator, + conv::Conv2dProblemSize problem_size, + TensorRef tensor_A, + TensorRef tensor_B, + TensorRef tensor_C, + TensorRef tensor_D, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr) { + + switch (convolutional_operator) { + case conv::Operator::kFprop: + return Conv2dFprop< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream); + break; + + case conv::Operator::kDgrad: + return Conv2dDgrad< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream); + break; + + case conv::Operator::kWgrad: + return Conv2dWgrad< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream); + break; + + default: break; + } + + return Status::kErrorNotSupported; +} + +/// Generic 3D convolution targeting Conv3dFprop, Conv3dDgrad, and Conv3dWgrad. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +Status Conv3d( + conv::Operator convolutional_operator, + conv::Conv3dProblemSize problem_size, + TensorRef tensor_A, + TensorRef tensor_B, + TensorRef tensor_C, + TensorRef tensor_D, + ElementCompute alpha, + ElementCompute beta, + cudaStream_t stream = nullptr) { + + switch (convolutional_operator) { + case conv::Operator::kFprop: + return Conv3dFprop< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream); + + case conv::Operator::kDgrad: + return Conv3dDgrad< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream); + + case conv::Operator::kWgrad: + return Conv3dWgrad< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream); + + default: break; + } + + return Status::kErrorNotSupported; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..a083bd14c330029aef6fe91f2f86da4d475c6bb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm.h @@ -0,0 +1,385 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for GEMM in device-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" + +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/util/reference/device/kernel/gemm.h" + +namespace cutlass { +namespace reference { +namespace device { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename AccumulatorType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_gemm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + AccumulatorType initial_accum) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + // Blocking structure potentially improves performance of reference implementation + // with a minor increase in complexity. + // + // Note, this reference implementation is NOT expected to approach peak performance. + using OutputTile = MatrixShape<4, 4>; + + dim3 block(16, 8); + + dim3 grid( + (problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow), + (problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn) + ); + + // Launch a GEMM kernel + kernel::Gemm< + TensorRef, + TensorRef, + TensorRef, + ScalarType, + AccumulatorType, + OutputTile, + InnerProductOp, + ConvertOp + ><<< grid, block >>>( + problem_size, + alpha, + tensor_a, + tensor_b, + beta, + tensor_c, + tensor_d, + initial_accum + ); +} +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// This assumes the accumulator type is the same type as the scalars. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename AccumulatorType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_gemm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + AccumulatorType initial_accum) { + + compute_gemm( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c, + initial_accum); +} + +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename AccumulatorType, + typename InnerProductOp = cutlass::arch::OpMultiplyAdd +> +struct Gemm; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + AccumulatorType initial_accum = AccumulatorType(0)) { + + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + AccumulatorType initial_accum = AccumulatorType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add-saturate +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + AccumulatorType initial_accum = AccumulatorType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm, + NumericConverterClamp>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + AccumulatorType initial_accum = AccumulatorType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm, + NumericConverterClamp>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for XOR-popc +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + AccumulatorType initial_accum = AccumulatorType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + AccumulatorType initial_accum = AccumulatorType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Batched GEMM +// +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a batch of GEMMs over a set of matrices of common dimension. +// +// TensorRefCollection* is a type satisfying the TensorRefCollection concept. +// +template < + typename TensorRefCollectionA, + typename TensorRefCollectionB, + typename TensorRefCollectionC, + typename ScalarType, + typename AccumulatorType, + typename InnerProductOp, + typename ConvertOp +> +void BatchedGemm( + gemm::GemmCoord problem_size, + int batch_count, + ScalarType alpha, + TensorRefCollectionA const& tensor_a, + TensorRefCollectionB const& tensor_b, + ScalarType beta, + TensorRefCollectionC &tensor_c, + AccumulatorType initial_accum) { + + static_assert( + TensorRefCollectionA::kRank == 2 && + TensorRefCollectionB::kRank == 2 && + TensorRefCollectionC::kRank == 2, "Tensors must be of rank 2"); + + // Blocking structure potentially improves performance of reference implementation + // with a minor increase in complexity. + // + // Note, this reference implementation is NOT expected to approach peak performance. + using OutputTile = MatrixShape<4, 4>; + + dim3 block(16, 8); + dim3 grid( + (problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow), + (problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn), + batch_count + ); + + // Launch a GEMM kernel + kernel::BatchedGemm< + TensorRefCollectionA, + TensorRefCollectionB, + TensorRefCollectionC, + ScalarType, + AccumulatorType, + OutputTile, + InnerProductOp, + ConvertOp + ><<< grid, block >>>( + problem_size, + alpha, + tensor_a, + tensor_b, + beta, + tensor_c, + initial_accum + ); +} + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +// +// TensorRefCollection* is a type satisfying the TensorRefCollection concept. +// +template < + typename TensorRefCollectionA, + typename TensorRefCollectionB, + typename TensorRefCollectionC, + typename ScalarType, + typename AccumulatorType +> +void BatchedGemm( + gemm::GemmCoord problem_size, + int batch_count, + ScalarType alpha, + TensorRefCollectionA const& tensor_a, + TensorRefCollectionB const& tensor_b, + ScalarType beta, + TensorRefCollectionC &tensor_c) { + + BatchedGemm(problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0)); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..04d308db6c74659db9675bedf07df64db0d3ad91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm_complex.h @@ -0,0 +1,350 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued GEMM in device-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_types.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" + +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" + +namespace cutlass { +namespace reference { +namespace device { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace kernel { + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ElementD = ElementC, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add, + int kMblock = 4, + int kNblock = 4 +> +__global__ void GemmComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum, + int batch_count = 1, + int64_t batch_stride_A = 0, + int64_t batch_stride_B = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_D = 0) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + int const M = problem_size.m(); + int const N = problem_size.n(); + int const K = problem_size.k(); + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock; + int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock; + int batch_idx = blockIdx.z; + + tensor_a.add_pointer_offset(batch_idx * batch_stride_A); + tensor_b.add_pointer_offset(batch_idx * batch_stride_B); + tensor_c.add_pointer_offset(batch_idx * batch_stride_C); + tensor_d.add_pointer_offset(batch_idx * batch_stride_D); + + for (; batch_idx < batch_count; batch_idx += gridDim.z) { + + // Compute matrix product using blocks + ComputeType accum[kMblock][kNblock]; + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) { + ElementA a = tensor_a.at(MatrixCoord(row, k_block)); + ElementB b = tensor_b.at(MatrixCoord(k_block, col)); + + ComputeType a_ik = ComputeType(a); + ComputeType b_kj = ComputeType(b); + + if (transform_a == ComplexTransform::kConjugate) { + a_ik = conj(a_ik); + } + + if (transform_b == ComplexTransform::kConjugate) { + b_kj = conj(b_kj); + } + + accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]); + } + } + } + } + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j]) + + beta * ScalarType(tensor_c.at(coord))); + } + } + } + + tensor_a.add_pointer_offset(batch_stride_A * gridDim.z); + tensor_b.add_pointer_offset(batch_stride_B * gridDim.z); + tensor_c.add_pointer_offset(batch_stride_C * gridDim.z); + tensor_d.add_pointer_offset(batch_stride_D * gridDim.z); + + } // for (batch_idx) +} + +} // namespace kernel + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ElementD = ElementC, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void GemmComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum, + int batch_count = 1, + int64_t batch_stride_A = 0, + int64_t batch_stride_B = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_D = 0) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + int const kMblock = 4; + int const kNblock = 4; + + dim3 block(16, 8); + dim3 grid( + (problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock), + (problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock), + batch_count % std::numeric_limits::max() + ); + + if (grid.y <= std::numeric_limits::max()) { + kernel::GemmComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ScalarType, + ComputeType, + ElementD, + ConvertOp, + InnerProductOp, + kMblock, + kNblock + ><<< grid, block >>>( + problem_size, + alpha, + tensor_a, + transform_a, + tensor_b, + transform_b, + beta, + tensor_c, + tensor_d, + initial_accum, + batch_count, + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D + ); + } else { + // Using bigger thread tile size + int const kBigMblock = 4; + int const kBigNblock = 16; + + dim3 Bigblock(16, 8); + dim3 Biggrid( + (problem_size.m() + block.x * kBigMblock - 1) / (block.x * kBigMblock), + (problem_size.n() + block.y * kBigNblock - 1) / (block.y * kBigNblock), + batch_count % std::numeric_limits::max() + ); + + kernel::GemmComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ScalarType, + ComputeType, + ElementD, + ConvertOp, + InnerProductOp, + kBigMblock, + kBigNblock + ><<< Biggrid, Bigblock >>>( + problem_size, + alpha, + tensor_a, + transform_a, + tensor_b, + transform_b, + beta, + tensor_c, + tensor_d, + initial_accum, + batch_count, + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D + ); + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// This assumes the accumulator type is the same type as the scalars. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ElementD = ElementC +> +void GemmComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d) { + + GemmComplex(problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, ScalarType(0)); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..baab696061374d6e2325398b67b1f2df913ed724 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h @@ -0,0 +1,311 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued GEMM in device code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/complex.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/numeric_types.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_ref_planar_complex.h" + +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" + +namespace cutlass { +namespace reference { +namespace device { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +static int const kGemmPlanarComplexBlockSize = 4; + +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add> +> +__global__ void GemmPlanarComplex( + gemm::GemmCoord problem_size, + complex alpha, + TensorRefPlanarComplex tensor_a, + ComplexTransform transform_a, + TensorRefPlanarComplex tensor_b, + ComplexTransform transform_b, + complex beta, + TensorRefPlanarComplex tensor_c, + TensorRefPlanarComplex tensor_d, + complex initial_accum) { + + int const kMblock = kGemmPlanarComplexBlockSize; + int const kNblock = kGemmPlanarComplexBlockSize; + + using ComplexA = typename TensorRefPlanarComplex::ComplexElement; + using ComplexB = typename TensorRefPlanarComplex::ComplexElement; + using ComplexC = typename TensorRefPlanarComplex::ComplexElement; + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + int const K = problem_size.k(); + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + complex accum[kMblock][kNblock]; + + int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock; + int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock; + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + accum[i][j] = initial_accum; + } + } + + CUTLASS_PRAGMA_NO_UNROLL + for (int k_block = 0; k_block < K; ++k_block) { + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) { + + ComplexA a_ik = tensor_a.at(MatrixCoord(row, k_block)); + ComplexB b_kj = tensor_b.at(MatrixCoord(k_block, col)); + + complex a = complex{ + ComputeType(a_ik.real()), + ComputeType(a_ik.imag()) + }; + + complex b = complex{ + ComputeType(b_kj.real()), + ComputeType(b_kj.imag()) + }; + + if (transform_a == ComplexTransform::kConjugate) { + a = conj(a); + } + + if (transform_b == ComplexTransform::kConjugate) { + b = conj(b); + } + + accum[i][j] = inner_product_op(a, b, accum[i][j]); + } + } + } + } + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + + complex acc{ + ScalarType(accum[i][j].real()), + ScalarType(accum[i][j].imag()) + }; + + ComplexC c_ij = ComplexC(); + + if (beta.real() != ScalarType() || beta.imag() != ScalarType()) { + c_ij = tensor_c.at(coord); + } + + complex src{ + ScalarType(c_ij.real()), + ScalarType(c_ij.imag()) + }; + + complex result = alpha * acc + beta * src; + + ComplexC d_ij; + + d_ij.real() = convert_op(result.real()); + d_ij.imag() = convert_op(result.imag()); + + tensor_d.at(coord) = d_ij; + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add> +> +void GemmPlanarComplex( + gemm::GemmCoord problem_size, + complex alpha, + TensorRefPlanarComplex tensor_a, + ComplexTransform transform_a, + TensorRefPlanarComplex tensor_b, + ComplexTransform transform_b, + complex beta, + TensorRefPlanarComplex tensor_c, + TensorRefPlanarComplex tensor_d, + complex initial_accum) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + int const kMblock = kernel::kGemmPlanarComplexBlockSize; + int const kNblock = kernel::kGemmPlanarComplexBlockSize; + + dim3 block(16, 8); + + dim3 grid( + (problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock), + (problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock), + 1); + + kernel::GemmPlanarComplex< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ScalarType, + ComputeType, + ConvertOp, + InnerProductOp + ><<< grid, block >>>( + problem_size, + alpha, + tensor_a, + transform_a, + tensor_b, + transform_b, + beta, + tensor_c, + tensor_d, + initial_accum + ); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// This assumes the accumulator type is the same type as the scalars. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType +> +void GemmPlanarComplex( + gemm::GemmCoord problem_size, + complex alpha, + TensorRefPlanarComplex tensor_a, + ComplexTransform transform_a, + TensorRefPlanarComplex tensor_b, + ComplexTransform transform_b, + complex beta, + TensorRefPlanarComplex tensor_c, + TensorRefPlanarComplex tensor_d) { + + GemmPlanarComplex( + problem_size, + alpha, + tensor_a, transform_a, + tensor_b, transform_b, + beta, + tensor_c, + tensor_d, + complex()); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gett.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gett.hpp new file mode 100644 index 0000000000000000000000000000000000000000..84b7037ee2ed2db22b5bda1d45146fc43b49e22d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gett.hpp @@ -0,0 +1,146 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief GETT device reference code +*/ +#pragma once + +#include + +namespace cutlass::reference::device { + +template < + class ATensor, + class BTensor, + class CTensor, + class DTensor, + class ElementAccumulator, + class ElementEpilogue> +__global__ static +void +gett_kernel( + DTensor D, + ATensor const A, + BTensor const B, + CTensor const C, + ElementEpilogue alpha, ElementEpilogue beta, + ElementAccumulator acc_init) +{ + using namespace cute; + + static_assert(DTensor::rank == 3, "(M,N,L)"); + static_assert(ATensor::rank == 3, "(M,K,L)"); + static_assert(BTensor::rank == 3, "(N,K,L)"); + static_assert(CTensor::rank == 3, "(M,N,L)"); + + assert(size<0>(A) == size<0>(D)); // M + assert(size<0>(C) == size<0>(D)); // M + assert(size<0>(B) == size<1>(D)); // N + assert(size<1>(C) == size<1>(D)); // N + assert(size<1>(A) == size<1>(B)); // K + assert(size<2>(A) == size<2>(D)); // L + assert(size<2>(B) == size<2>(D)); // L + assert(size<2>(C) == size<2>(D)); // L + + NumericConverter a_converter; + NumericConverter b_converter; + NumericConverter acc_converter; + NumericConverter source_converter; + NumericConverter output_converter; + + // Thread id to each element of D + for (int tid = threadIdx.x + blockDim.x * blockIdx.x; + tid < size(D); + tid += blockDim.x * gridDim.x) { + // (m,n,l) coordinate + auto mnl_coord = idx2crd(tid, product_each(shape(D))); + auto m = get<0>(mnl_coord); + auto n = get<1>(mnl_coord); + auto l = get<2>(mnl_coord); + + auto A_ml = A(m,_,l); + auto B_nl = B(n,_,l); + + ElementAccumulator accum = ElementAccumulator(0); + for (int k = 0; k < size<1>(A); ++k) { + ElementAccumulator a = a_converter(A_ml(k)); + ElementAccumulator b = b_converter(B_nl(k)); + accum += a * b; + } + + ElementEpilogue scaled_output = (alpha * acc_converter(accum)) + (beta * source_converter(C(m,n,l))); + D(m,n,l) = output_converter(scaled_output); + } +} + +// Most general version +template < + class ProblemShapeMNKL, + class ElementA, + class StrideA, + class ElementB, + class StrideB, + class ElementAccumulator, + class ElementC, + class StrideC, + class ElementD, + class StrideD, + class ElementEpilogue> +void +gett( + ProblemShapeMNKL problem_shape_mnkl, + ElementA const* ptr_A, StrideA stride_a_mkl, + ElementB const* ptr_B, StrideB stride_b_nkl, + ElementAccumulator _, + ElementC const* ptr_C, StrideC stride_c_mnl, + ElementD * ptr_D, StrideD stride_d_mnl, + ElementEpilogue alpha, ElementEpilogue beta, + cudaStream_t stream = 0) { + using namespace cute; + + static_assert(rank(ProblemShapeMNKL{}) == 4); + auto M = get<0>(problem_shape_mnkl); + auto N = get<1>(problem_shape_mnkl); + auto K = get<2>(problem_shape_mnkl); + auto L = get<3>(problem_shape_mnkl); + + // Represent the full tensors + auto A = make_tensor(make_gmem_ptr(ptr_A), make_shape(M,K,L), stride_a_mkl); // (M,K,L) + auto B = make_tensor(make_gmem_ptr(ptr_B), make_shape(N,K,L), stride_b_nkl); // (N,K,L) + auto C = make_tensor(make_gmem_ptr(ptr_C), make_shape(M,N,L), stride_c_mnl); // (M,N,L) + auto D = make_tensor(make_gmem_ptr(ptr_D), make_shape(M,N,L), stride_d_mnl); // (M,N,L) + + dim3 dimBlock(256); + dim3 dimGrid(240); + gett_kernel<<< dimGrid, dimBlock, 0, stream >>>(D, A, B, C, alpha, beta, ElementAccumulator(0)); +} + +} // namespace cutlass::reference::device diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/gemm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..e917765bea9e28eeb67fdc316794f868c568970d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/gemm.h @@ -0,0 +1,162 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for GEMM in host-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/util/reference/device/thread/gemm.h" + +namespace cutlass { +namespace reference { +namespace device { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename TensorRefA, + typename TensorRefB, + typename TensorRefC, + typename ScalarType, + typename AccumulatorType, + typename OutputTile, + typename InnerProductOp, + typename ConvertOp +> +__global__ void Gemm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRefA tensor_a, + TensorRefB tensor_b, + ScalarType beta, + TensorRefC tensor_c, + TensorRefC tensor_d, + AccumulatorType initial_accum) { + + // Map each thread to a unique tile of the output matrix + MatrixCoord output_coord( + MatrixCoord::Index((threadIdx.x + blockIdx.x * blockDim.x) * OutputTile::kRow), + MatrixCoord::Index((threadIdx.y + blockIdx.y * blockDim.y) * OutputTile::kColumn) + ); + + // Compute the general matrix product + thread::Gemm< + TensorRefA, + TensorRefB, + TensorRefC, + ScalarType, + AccumulatorType, + OutputTile, + InnerProductOp, + ConvertOp + > gemm(initial_accum); + + gemm.multiply_add( + problem_size, + tensor_a, + tensor_b, + output_coord); + + gemm.epilogue(problem_size, alpha, beta, tensor_c, tensor_d, output_coord); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename TensorRefCollectionA, + typename TensorRefCollectionB, + typename TensorRefCollectionC, + typename ScalarType, + typename AccumulatorType, + typename OutputTile, + typename InnerProductOp, + typename ConvertOp +> +__global__ void BatchedGemm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRefCollectionA tensor_collection_a, + TensorRefCollectionB tensor_collection_b, + ScalarType beta, + TensorRefCollectionC tensor_collection_c, + AccumulatorType initial_accum) { + + // Obtain batch ID + int batch_id = blockIdx.z; + + // Dereference based on batch_id + typename TensorRefCollectionA::TensorRef tensor_a = tensor_collection_a.at(batch_id); + typename TensorRefCollectionB::TensorRef tensor_b = tensor_collection_b.at(batch_id); + typename TensorRefCollectionC::TensorRef tensor_c = tensor_collection_c.at(batch_id); + + // Map each thread to a unique tile of the output matrix + MatrixCoord output_coord( + (threadIdx.x + blockIdx.x * blockDim.x) * OutputTile::kColumn, + (threadIdx.y + blockIdx.y * blockDim.y) * OutputTile::kRow + ); + + // Compute the general matrix product + thread::Gemm< + typename TensorRefCollectionA::TensorRef, + typename TensorRefCollectionB::TensorRef, + typename TensorRefCollectionC::TensorRef, + ScalarType, + AccumulatorType, + OutputTile, + InnerProductOp, + ConvertOp + > gemm(initial_accum); + + gemm.multiply_add( + problem_size, + tensor_a, + tensor_b, + output_coord); + + gemm.epilogue(problem_size, alpha, beta, tensor_c, output_coord); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h new file mode 100644 index 0000000000000000000000000000000000000000..4850b98a8f4e9d0d717fedc9c76a99b19bf158cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h @@ -0,0 +1,168 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include + +#include "cutlass/cutlass.h" + +namespace cutlass { +namespace reference { +namespace device { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel to initialize tensor to uniform random distribution +template +__global__ void TensorInitializeUniform( + Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) { + __shared__ curandState_t rng_state[1024]; + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; + + curand_init(seed, gtid, 0, &rng_state[threadIdx.x]); + + int c_idx = blockIdx.x * blockDim.x + threadIdx.x; + int s_idx = blockIdx.y * blockDim.x; + + tensor += s_idx * ldm + c_idx; + + for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) { + if (s_idx < dim_strided && c_idx < dim_contiguous) { + double range = dist.uniform.max - dist.uniform.min; + + double rnd = curand_uniform(&rng_state[threadIdx.x]); + + rnd = dist.uniform.min + range * rnd; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + if (dist.int_scale >= 0) { + rnd = double(int(rnd * double(1 << dist.int_scale))); + *tensor = T(rnd / double(1 << dist.int_scale)); + } else { + *tensor = T(rnd); + } + + tensor += ldm; + } + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel to initialize tensor to uniform distribution +template +__global__ void TensorInitializeGaussian( + Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) { + __shared__ curandState_t rng_state[1024]; + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; + + curand_init(seed, gtid, 0, &rng_state[threadIdx.x]); + + int c_idx = blockIdx.x * blockDim.x + threadIdx.x; + int s_idx = blockIdx.y * blockDim.x; + + tensor += s_idx * ldm + c_idx; + + for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) { + if (s_idx < dim_strided && c_idx < dim_contiguous) { + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + + double rnd = curand_normal(&rng_state[threadIdx.x]); + + rnd = dist.gaussian.mean + dist.gaussian.stddev * rnd; + + if (dist.int_scale >= 0) { + rnd = double(int(rnd * double(1 << dist.int_scale))); + *tensor = T(rnd / double(1 << dist.int_scale)); + } else { + *tensor = T(rnd); + } + } + } +} + +/// Kernel to initialize tensor to an identity matrix +template +__global__ void TensorInitializeLinear( + Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) { + __shared__ curandState_t rng_state[1024]; + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; + + curand_init(seed, gtid, 0, &rng_state[threadIdx.x]); + + int c_idx = blockIdx.x * blockDim.x + threadIdx.x; + int s_idx = blockIdx.y * blockDim.x; + + tensor += s_idx * ldm + c_idx; + + for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) { + if (s_idx < dim_strided && c_idx < dim_contiguous) { + *tensor = + dist.linear.offset + dist.linear.delta_row * c_idx + dist.linear.delta_column * s_idx; + } + } +} + +/// Kernel to initialize tensor to an identity matrix +template +__global__ void TensorInitializeIdentity( + Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) { + __shared__ curandState_t rng_state[1024]; + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; + + curand_init(seed, gtid, 0, &rng_state[threadIdx.x]); + + int c_idx = blockIdx.x * blockDim.x + threadIdx.x; + int s_idx = blockIdx.y * blockDim.x; + + tensor += s_idx * ldm + c_idx; + + for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) { + if (s_idx < dim_strided && c_idx < dim_contiguous) { + *tensor = (c_idx == s_idx ? T(1) : T(0)); + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_foreach.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_foreach.h new file mode 100644 index 0000000000000000000000000000000000000000..d294258bc0baba31af76b962002e59996eab838e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_foreach.h @@ -0,0 +1,159 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/coord.h" +#include "cutlass/subbyte_reference.h" +#include "cutlass/fast_math.h" + +namespace cutlass { +namespace reference { +namespace device { +namespace kernel { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Defines several helpers +namespace detail { + +/// Helper to perform for-each operation +template +struct TensorForEachHelper { + + /// Constructor for general rank + __inline__ __device__ + TensorForEachHelper(Func &func, Coord const &size, Coord &coord, int64_t index) { + + int64_t product = 1; + + CUTLASS_PRAGMA_UNROLL + for (int i = Rank - RankRemaining; i < Rank; ++i) { + product *= size[i]; + } + + coord[Rank - 1 - RankRemaining] = index / product; + int64_t remaining = index % product; + + TensorForEachHelper(func, size, coord, remaining); + } +}; + +/// Helper to perform for-each operation +template +struct TensorForEachHelper { + + /// Constructor for fastest changing rank + __inline__ __device__ + TensorForEachHelper(Func &func, Coord const &size, Coord &coord, int64_t index) { + + coord[Rank - 1] = index; + + if (coord < size) { + func(coord); + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel calls a functor for each element in a tensor's index space +template +__global__ void TensorForEach(Coord size, Params params = Params()) { + + Func func(params); + + int64_t index = threadIdx.x + blockIdx.x * blockDim.x; + int64_t max_index = 1; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Rank; ++i) { + max_index *= size[i]; + } + + CUTLASS_PRAGMA_NO_UNROLL + while (index < max_index) { + Coord coord; + + detail::TensorForEachHelper(func, size, coord, index); + index += blockDim.x * gridDim.x; + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel calls a functor for each element along a tensor's diagonal +template +__global__ void TensorDiagonalForEach(Coord size, Params params, int start, int end) { + + Func func(params); + + int64_t index = threadIdx.x + blockIdx.x * blockDim.x + start; + + if (index < end) { + Coord coord; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Rank; ++i) { + coord[i] = index; + } + + func(coord); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +__global__ void BlockForEach( + Element *ptr, + size_t capacity, + typename Func::Params params) { + + Func func(params); + + size_t index = threadIdx.x + blockIdx.x * blockDim.x; + + for (; index < capacity; index += blockDim.x * gridDim.x) { + ReferenceFactory::get(ptr, index) = func(); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace device +} // namespace reference +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/rank_2k_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/rank_2k_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..357ca3cfd558c0dbd463db66bc6ba60e0cd18b52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/rank_2k_complex.h @@ -0,0 +1,355 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued GEMM in device-side code. +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" + +namespace cutlass { +namespace reference { +namespace device { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace kernel { + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add, + int kMblock = 4, + int kNblock = 4 +> +__global__ void Rank2KComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum, + FillMode fill_mode_c, + BlasMode blas_mode, + int batch_count = 1, + int64_t batch_stride_A = 0, + int64_t batch_stride_B = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_D = 0) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + int const M = problem_size.m(); + int const N = problem_size.n(); + int const K = problem_size.k(); + + assert(M=N); + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock; + int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock; + int batch_idx = blockIdx.z; + + tensor_a.add_pointer_offset(batch_idx * batch_stride_A); + tensor_b.add_pointer_offset(batch_idx * batch_stride_B); + tensor_c.add_pointer_offset(batch_idx * batch_stride_C); + tensor_d.add_pointer_offset(batch_idx * batch_stride_D); + + for (; batch_idx < batch_count; batch_idx += gridDim.z) { + + // Compute matrix product using blocks + ComputeType accum[kMblock][kNblock]; + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N && + ( (fill_mode_c == FillMode::kLower && row >= col) || + (fill_mode_c == FillMode::kUpper && row <= col) ) + ) { + + // A x B^T (Symmetric) or A x B^H (Hermitian) + // complex conjugation on operandB (b_t) is function of blas3 computation + ElementA a = tensor_a.at(MatrixCoord(row, k_block)); + ElementB b_t = (blas_mode == BlasMode::kHermitian) ? + conj(tensor_b.at(MatrixCoord(col, k_block))) : + tensor_b.at(MatrixCoord(col, k_block)); + + ComputeType a_ik = ComputeType(a); + ComputeType b_jk = ComputeType(b_t); + + // complex conjugation is a function of operand layouts + if (transform_a == ComplexTransform::kConjugate) { + a_ik = conj(a_ik); + } + // complex conjugation is a function of operand layouts + if (transform_b == ComplexTransform::kConjugate) { + b_jk = conj(b_jk); + } + + accum[i][j] = inner_product_op(a_ik, b_jk, accum[i][j]); + + // B x A^T (Symmetric) or B x A^H (Hermitian) + // complex conjugation on operandB (a_t) is function of blas3 computation + ElementB b = tensor_b.at(MatrixCoord(row, k_block)); + ElementA a_t = (blas_mode == BlasMode::kHermitian) ? + conj(tensor_a.at(MatrixCoord(col, k_block))): + tensor_a.at(MatrixCoord(col, k_block)); + + ComputeType b_ik = ComputeType(b); + ComputeType a_jk = ComputeType(a_t); + + // complex conjugation here is a function of operand layouts + if (transform_b == ComplexTransform::kConjugate) { + b_ik = conj(b_ik); + } + // complex conjugation here is a function of operand layouts + if (transform_a == ComplexTransform::kConjugate) { + a_jk = conj(a_jk); + } + + accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]); + } + } + } + } + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kNblock; j++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kMblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N && + ((fill_mode_c == FillMode::kLower && row >= col) || + (fill_mode_c == FillMode::kUpper && row <= col)) + ) { + + ScalarType c = tensor_c.at(coord); + // The imaginary parts of the diagonal elements of + // a complex data type are assumed and set to zero + if (blas_mode == BlasMode::kHermitian) { + c = (row == col) ? real(c) : c; + } + + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j]) + + beta * c); + } + } + } + + tensor_a.add_pointer_offset(batch_stride_A * gridDim.z); + tensor_b.add_pointer_offset(batch_stride_B * gridDim.z); + tensor_c.add_pointer_offset(batch_stride_C * gridDim.z); + tensor_d.add_pointer_offset(batch_stride_D * gridDim.z); + + } // for (batch_idx) +} + +} // namespace kernel + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Rank2KComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum, + FillMode fill_mode_c, + BlasMode blas_mode, + int batch_count = 1, + int64_t batch_stride_A = 0, + int64_t batch_stride_B = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_D = 0) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + int const kMblock = 4; + int const kNblock = 4; + + dim3 block(16, 8); + dim3 grid( + (problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock), + (problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock), + batch_count % std::numeric_limits::max() + ); + + kernel::Rank2KComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ScalarType, + ComputeType, + ConvertOp, + InnerProductOp, + kMblock, + kNblock + ><<< grid, block >>>( + problem_size, + alpha, + tensor_a, + transform_a, + tensor_b, + transform_b, + beta, + tensor_c, + tensor_d, + initial_accum, + fill_mode_c, + blas_mode, + batch_count, + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D + ); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// This assumes the accumulator type is the same type as the scalars. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType +> +void Rank2KComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + FillMode fill_mode_c, + BlasMode blas_mode) { + + Rank2KComplex( + problem_size, alpha, + tensor_a, transform_a, + tensor_b, transform_b, + beta, tensor_c, tensor_d, + ScalarType(0), + fill_mode_c, + blas_mode); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_compare.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_compare.h new file mode 100644 index 0000000000000000000000000000000000000000..e29ad69f848b0b84f270da6c703fb5ea32667847 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_compare.h @@ -0,0 +1,246 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines host-side elementwise operations on TensorView. +*/ + +#pragma once +// Standard Library includes +#include + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/relatively_equal.h" + +#include "cutlass/util/distribution.h" + +#include "tensor_foreach.h" + +namespace cutlass { +namespace reference { +namespace device { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace kernel { + +template +__global__ void BlockCompareEqual( + int *equal, + Element const *ptr_A, + Element const *ptr_B, + size_t capacity) { + + size_t idx = threadIdx.x + blockDim.x * blockIdx.x; + + for (; idx < capacity; idx += gridDim.x * blockDim.x) { + + Element a = cutlass::ReferenceFactory::get(ptr_A, idx); + Element b = cutlass::ReferenceFactory::get(ptr_B, idx); + + if (a != b) { + *equal = 0; + + return; + } + } +} + +template +__global__ void BlockCompareRelativelyEqual( + int *equal, + Element const *ptr_A, + Element const *ptr_B, + size_t capacity, + Element epsilon, + Element nonzero_floor) { + + size_t idx = threadIdx.x + blockDim.x * blockIdx.x; + + for (; idx < capacity; idx += gridDim.x * blockDim.x) { + + Element a = cutlass::ReferenceFactory::get(ptr_A, idx); + Element b = cutlass::ReferenceFactory::get(ptr_B, idx); + + if (!relatively_equal(a, b, epsilon, nonzero_floor)) { + *equal = 0; + return; + } + } +} + +} // namespace kernel + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Performs a bit-level equality check between two blocks +template +bool BlockCompareEqual( + Element const *ptr_A, + Element const *ptr_B, + size_t capacity, + int grid_size = 0, + int block_size = 0) { + + int equal_flag = 1; + int *device_equal_flag = nullptr; + + if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) { + throw std::runtime_error("Failed to allocate device flag."); + } + + if (cudaMemcpy( + device_equal_flag, + &equal_flag, + sizeof(int), + cudaMemcpyHostToDevice) != cudaSuccess) { + + throw std::runtime_error("Failed to copy equality flag to device."); + } + + if (!grid_size || !block_size) { + + // if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API + cudaError_t result = cudaOccupancyMaxPotentialBlockSize( + &grid_size, + &block_size, + reinterpret_cast(kernel::BlockCompareEqual)); + + if (result != cudaSuccess) { + throw std::runtime_error("Failed to query occupancy."); + } + + // Limit block size. This has the effect of increasing the number of items processed by a + // single thread and reduces the impact of initialization overhead. + block_size = (block_size < 128 ? block_size : 128); + } + + dim3 grid(grid_size, 1, 1); + dim3 block(block_size, 1, 1); + + kernel::BlockCompareEqual<<< grid, block >>>(device_equal_flag, ptr_A, ptr_B, capacity); + + if (cudaMemcpy( + &equal_flag, + device_equal_flag, + sizeof(int), + cudaMemcpyDeviceToHost) != cudaSuccess) { + + cudaFree(device_equal_flag); + + throw std::runtime_error("Failed to copy equality flag from device."); + } + + cudaFree(device_equal_flag); + + return equal_flag; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Performs a bit-level equality check between two blocks +template +bool BlockCompareRelativelyEqual( + Element const *ptr_A, + Element const *ptr_B, + size_t capacity, + Element epsilon, + Element nonzero_floor, + int grid_size = 0, + int block_size = 0) { + + int equal_flag = 1; + int *device_equal_flag = nullptr; + + if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) { + throw std::runtime_error("Failed to allocate device flag."); + } + + if (cudaMemcpy( + device_equal_flag, + &equal_flag, + sizeof(int), + cudaMemcpyHostToDevice) != cudaSuccess) { + + throw std::runtime_error("Failed to copy equality flag to device."); + } + + if (!grid_size || !block_size) { + + // if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API + cudaError_t result = cudaOccupancyMaxPotentialBlockSize( + &grid_size, + &block_size, + reinterpret_cast(kernel::BlockCompareRelativelyEqual)); + + if (result != cudaSuccess) { + throw std::runtime_error("Failed to query occupancy."); + } + + // Limit block size. This has the effect of increasing the number of items processed by a + // single thread and reduces the impact of initialization overhead. + block_size = (block_size < 128 ? block_size : 128); + } + + dim3 grid(grid_size, 1, 1); + dim3 block(block_size, 1, 1); + + kernel::BlockCompareRelativelyEqual<<< grid, block >>>( + device_equal_flag, + ptr_A, + ptr_B, + capacity, + epsilon, + nonzero_floor + ); + + if (cudaMemcpy( + &equal_flag, + device_equal_flag, + sizeof(int), + cudaMemcpyDeviceToHost) != cudaSuccess) { + + cudaFree(device_equal_flag); + + throw std::runtime_error("Failed to copy equality flag from device."); + } + + cudaFree(device_equal_flag); + + return equal_flag; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // device +} // reference +} // cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_fill.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_fill.h new file mode 100644 index 0000000000000000000000000000000000000000..b066de321f54c576838607e81683ea9ce697b4ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_fill.h @@ -0,0 +1,2000 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines device-side elementwise operations on TensorView. Note, the operations defined + in this header are not specialized for any particular data layout and are therefore not + intended to offer the best possible performance. Rather, they are intended to be generic + reference implementations to support the CUTLASS unit tests. +*/ + +#pragma once + +#if !defined(__CUDACC_RTC__) + +// Standard Library includes +#include +#include +#include +#include +#include + +#endif + +// CUDA includes +#include + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/complex.h" +#include "cutlass/tensor_view.h" +#include "cutlass/blas3.h" + +#include "cutlass/layout/vector.h" + +#include "cutlass/util/reference/device/tensor_foreach.h" +#include "cutlass/util/distribution.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reference { +namespace device { + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +CUTLASS_DEVICE +FloatType random_normal_float(curandState_t *state) { + return curand_normal(state); +} + +template <> +CUTLASS_DEVICE +double random_normal_float(curandState_t *state) { + return curand_normal_double(state); +} + +template +CUTLASS_DEVICE +FloatType random_uniform_float(curandState_t *state) { + return curand_uniform(state); +} + +template <> +CUTLASS_DEVICE +double random_uniform_float(curandState_t *state) { + return curand_uniform_double(state); +} + +template +struct RandomGaussianFunc { + + using FloatType = typename std::conditional<(sizeof(Element) > 4), double, float>::type; + using IntType = typename std::conditional<(sizeof(Element) > 4), int64_t, int>::type; + + /// Parameters structure + struct Params { + + // + // Data members + // + + uint64_t seed; + FloatType mean; + FloatType stddev; + int int_scale; + FloatType float_scale_up; + FloatType float_scale_down; + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + uint64_t seed_ = 0, + Element mean_ = 0, + Element stddev_ = 1, + int int_scale_ = -1 + ): + seed(seed_), + mean(static_cast(mean_)), + stddev(static_cast(stddev_)), + int_scale(int_scale_) { + + float_scale_up = FloatType(IntType(1) << int_scale); + float_scale_up += FloatType(0.5) * float_scale_up; + float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale); + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + /// RNG state object + curandState_t rng_state; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + RandomGaussianFunc(Params const ¶ms): params(params) { + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x; + + curand_init(params.seed, gtid, 0, &rng_state); + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + Element operator()() { + + FloatType rnd = random_normal_float(&rng_state); + rnd = params.mean + params.stddev * rnd; + + Element result; + if (params.int_scale >= 0) { + rnd = FloatType(IntType(rnd * params.float_scale_up)); + result = Element(rnd * params.float_scale_down); + } + else { + result = Element(rnd); + } + + return result; + } +}; + + +template +struct RandomGaussianFunc> { + + using Element = complex; + using FloatType = typename std::conditional<(sizeof(Real) > 4), double, float>::type; + using IntType = typename std::conditional<(sizeof(Real) > 4), int64_t, int>::type; + + /// Parameters structure + struct Params { + + // + // Data members + // + + uint64_t seed; + FloatType mean; + FloatType stddev; + int int_scale; + FloatType float_scale_up; + FloatType float_scale_down; + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + uint64_t seed_ = 0, + Real mean_ = 0, + Real stddev_ = 1, + int int_scale_ = -1 + ): + seed(seed_), + mean(static_cast(mean_)), + stddev(static_cast(stddev_)), + int_scale(int_scale_) { + + float_scale_up = FloatType(IntType(1) << int_scale); + float_scale_up += FloatType(0.5) * float_scale_up; + float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale); + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + /// RNG state object + curandState_t rng_state; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + RandomGaussianFunc(Params const ¶ms): params(params) { + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x; + + curand_init(params.seed, gtid, 0, &rng_state); + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + Element operator()() { + + FloatType rnd_r = random_normal_float(&rng_state); + FloatType rnd_i = random_normal_float(&rng_state); + rnd_r = params.mean + params.stddev * rnd_r; + rnd_i = params.mean + params.stddev * rnd_i; + + Element result; + if (params.int_scale >= 0) { + rnd_r = FloatType(IntType(rnd_r * params.float_scale_up)); + rnd_i = FloatType(IntType(rnd_i * params.float_scale_down)); + + result = { + Real(rnd_r * params.float_scale_down), + Real(rnd_i * params.float_scale_down) + }; + } + else { + result = Element(Real(rnd_r), Real(rnd_i)); + } + + return result; + } +}; + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillRandomGaussianFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + using RandomFunc = RandomGaussianFunc; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + typename RandomFunc::Params random; + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_ = TensorView(), + typename RandomFunc::Params random_ = typename RandomFunc::Params() + ): + view(view_), random(random_) { + + } + }; + + // + // Data members + // + + Params params; + RandomFunc random; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorFillRandomGaussianFunc(Params const ¶ms): params(params), random(params.random) { + + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + params.view.at(coord) = random(); + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a Gaussian distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomGaussian( + TensorView view, ///< destination tensor + uint64_t seed, ///< seed for RNG + typename RealType::Type mean = Element(0), ///< Gaussian distribution's mean + typename RealType::Type stddev = Element(1), ///< Gaussian distribution's standard deviation + int bits = -1, ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + cudaStream_t stream = nullptr) { + + using RandomFunc = detail::RandomGaussianFunc; + using Func = detail::TensorFillRandomGaussianFunc; + using Params = typename Func::Params; + + TensorForEach( + view.extent(), + Params(view, typename RandomFunc::Params(seed, mean, stddev, bits)), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a Gaussian distribution. +template ///< Element type +void BlockFillRandomGaussian( + Element *ptr, + size_t capacity, + uint64_t seed, ///< seed for RNG + typename RealType::Type mean, ///< Gaussian distribution's mean + typename RealType::Type stddev, ///< Gaussian distribution's standard deviation + int bits = -1, ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + cudaStream_t stream = nullptr) { + + using RandomFunc = detail::RandomGaussianFunc; + + typename RandomFunc::Params params(seed, mean, stddev, bits); + + BlockForEach(ptr, capacity, params, /*grid_size*/0, /*block_size*/0, stream); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Computes a random Gaussian distribution +template ///< Element type +struct RandomUniformFunc { + + using FloatType = typename std::conditional< + (sizeof(Element) > 4), + double, + float>::type; + + using IntType = typename std::conditional< + (sizeof(Element) > 4), + int64_t, + int>::type; + + /// Parameters structure + struct Params { + + // + // Data members + // + + uint64_t seed; + FloatType range; + FloatType max; + int int_scale; + FloatType float_scale_up; + FloatType float_scale_down; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + uint64_t seed_ = 0, + Element max_ = 1, + Element min = 0, + int int_scale_ = -1 + ): + seed(seed_), + range(static_cast(max_ - min)), + max(static_cast(max_)), + int_scale(int_scale_) { + + float_scale_up = FloatType(IntType(1) << int_scale); + float_scale_up += FloatType(0.5) * float_scale_up; + float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale); + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + /// RNG state object + curandState_t rng_state; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + RandomUniformFunc(Params const ¶ms): params(params) { + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x; + + curand_init(params.seed, gtid, 0, &rng_state); + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + Element operator()() { + + FloatType rnd = random_uniform_float(&rng_state); + rnd = params.max - params.range * rnd; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + Element result; + + if (params.int_scale >= 0) { + rnd = FloatType(IntType(rnd * params.float_scale_up)); + result = Element(rnd * params.float_scale_down); + } + else { + result = Element(rnd); + } + + return result; + } +}; + +/// Computes a random Gaussian distribution +template +struct RandomUniformFunc> { + + using Element = complex; + + using FloatType = typename std::conditional< + (sizeof(Real) > 4), + double, + float>::type; + + using IntType = typename std::conditional< + (sizeof(Real) > 4), + int64_t, + int>::type; + + /// Parameters structure + struct Params { + + // + // Data members + // + + uint64_t seed; + FloatType range; + FloatType min; + int int_scale; + FloatType float_scale_up; + FloatType float_scale_down; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + uint64_t seed_ = 0, + FloatType max = 1, + FloatType min_ = 0, + int int_scale_ = -1 + ): + seed(seed_), + range(static_cast(max - min_)), + min(static_cast(min_)), + int_scale(int_scale_) { + + float_scale_up = FloatType(IntType(1) << int_scale); + float_scale_up += FloatType(0.5) * float_scale_up; + float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale); + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + /// RNG state object + curandState_t rng_state; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + RandomUniformFunc(Params const ¶ms): params(params) { + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x; + + curand_init(params.seed, gtid, 0, &rng_state); + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + Element operator()() { + + FloatType rnd_r = random_uniform_float(&rng_state); + FloatType rnd_i = random_uniform_float(&rng_state); + + rnd_r = params.min + params.range * rnd_r; + rnd_i = params.min + params.range * rnd_i; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + Element result; + + if (params.int_scale >= 0) { + rnd_r = FloatType(IntType(rnd_r * params.float_scale_up)); + rnd_i = FloatType(IntType(rnd_i * params.float_scale_up)); + + result = { + Real(rnd_r * params.float_scale_down), + Real(rnd_i * params.float_scale_down) + }; + } + else { + result = Element(Real(rnd_r), Real(rnd_i)); + } + + return result; + } +}; + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillRandomUniformFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + using RandomFunc = RandomUniformFunc; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + typename RandomFunc::Params random; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_ = TensorView(), + typename RandomFunc::Params random_ = RandomFunc::Params() + ): + view(view_), random(random_) { + + } + }; + + // + // Data members + // + + Params params; + RandomFunc random; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorFillRandomUniformFunc(Params const ¶ms): params(params), random(params.random) { + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + params.view.at(coord) = random(); + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomUniform( + TensorView view, ///< destination tensor + uint64_t seed, ///< seed for RNG + typename RealType::Type max = Element(1), ///< upper bound of distribution + typename RealType::Type min = Element(0), ///< lower bound for distribution + int bits = -1, ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + cudaStream_t stream = nullptr) { + + using RandomFunc = detail::RandomUniformFunc; + using Func = detail::TensorFillRandomUniformFunc; + using Params = typename Func::Params; + + typename RandomFunc::Params random(seed, max, min, bits); + + TensorForEach( + view.extent(), + Params(view, random), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template +void BlockFillRandomUniform( + Element *ptr, + size_t capacity, + uint64_t seed, ///< seed for RNG + typename RealType::Type max, ///< upper bound of distribution + typename RealType::Type min, ///< lower bound for distribution + int bits = -1, ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + cudaStream_t stream = nullptr) { + + using RandomFunc = detail::RandomUniformFunc; + + typename RandomFunc::Params params(seed, max, min, bits); + + BlockForEach(ptr, capacity, params, /*grid_size*/0, /*block_size*/0, stream); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Computes a random sparse meta +template ///< Element type +struct RandomSparseMetaFunc { + + using FloatType = float; + + using IntType = int32_t; + + /// Parameters structure + struct Params { + + // + // Data members + // + + uint64_t seed; + FloatType range; + int MetaSizeInBits; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + uint64_t seed_ = 0, + int MetaSizeInBits_ = 2 + ): + seed(seed_), + MetaSizeInBits(MetaSizeInBits_) { + if (MetaSizeInBits_ == 2) { + range = 6; + } else if (MetaSizeInBits_ == 4) { + range = 2; + } + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + /// RNG state object + curandState_t rng_state; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + RandomSparseMetaFunc(Params const ¶ms): params(params) { + + uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x; + + curand_init(params.seed, gtid, 0, &rng_state); + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + Element operator()() { + Element FourToTwoMeta[6] = {0x4, 0x8, 0x9, 0xc, 0xd, 0xe}; + Element TwoToOneMeta[2] = {0x4, 0xe}; + + Element *MetaArray = + (params.MetaSizeInBits == 2) ? FourToTwoMeta : TwoToOneMeta; + + Element result = 0x0; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < cutlass::sizeof_bits::value / 4; ++i) { + FloatType rnd = random_uniform_float(&rng_state); + rnd = params.range * rnd; + Element meta = MetaArray[(int)rnd]; + + result = (Element)(result | ((Element)(meta << (i * 4)))); + } + + return result; + } +}; + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillRandomSparseMetaFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + using RandomFunc = RandomSparseMetaFunc; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + typename RandomFunc::Params random; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_ = TensorView(), + typename RandomFunc::Params random_ = RandomFunc::Params() + ): + view(view_), random(random_) { + + } + }; + + // + // Data members + // + + Params params; + RandomFunc random; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorFillRandomSparseMetaFunc(Params const ¶ms): params(params), random(params.random) { + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + params.view.at(coord) = random(); + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomSparseMeta( + TensorView view, ///< destination tensor + uint64_t seed, ///< seed for RNG + int MetaSizeInBits = 2, ///< meta data size + cudaStream_t stream = nullptr) { + + using RandomFunc = detail::RandomSparseMetaFunc; + using Func = detail::TensorFillRandomUniformFunc; + using Params = typename Func::Params; + + typename RandomFunc::Params random(seed, MetaSizeInBits); + + TensorForEach( + view.extent(), + Params(view, random), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template +void BlockFillRandomSparseMeta( + Element *ptr, + size_t capacity, + uint64_t seed, ///< seed for RNG + int MetaSizeInBits = 2, ///< meta data size + cudaStream_t stream = nullptr) { + + using RandomFunc = detail::RandomSparseMetaFunc; + + typename RandomFunc::Params params(seed, MetaSizeInBits); + + BlockForEach(ptr, capacity, params, /*grid_size*/0, /*block_size*/0, stream); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Functor to fill a tensor with zeros off the diagonal and a uniform value on the diagonal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillDiagonalFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Element diag; + Element other; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + Params( + TensorView view_ = TensorView(), + Element diag_ = Element(1), + Element other_ = Element(0) + ): + view(view_), diag(diag_), other(other_) { + + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorFillDiagonalFunc(Params const ¶ms): params(params) { + + } + + /// Updates the tensor + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + bool is_diag = true; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i] != coord[i - 1]) { + is_diag = false; + break; + } + } + + params.view.at(coord) = (is_diag ? params.diag : params.other); + } +}; + +// Overwrites the elements of a tensor with a uniform value depending on fill mode +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillPartialFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Element element; + FillMode fill_mode; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params(): fill_mode(FillMode::kNone) { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_, + Element element_, + FillMode fill_mode_ + ): + view(view_), element(element_), fill_mode(fill_mode_) { + + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + CUTLASS_DEVICE + TensorFillPartialFunc(Params const ¶ms): params(params) { + + } + + /// Overwrites the element if it is within the covered region. + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + bool predicate = true; + + switch (params.fill_mode) { + case FillMode::kFull: + predicate = true; + break; + + case FillMode::kLower: + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i - 1] < coord[i]) { + predicate = false; + break; + } + } + break; + + case FillMode::kUpper: + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i - 1] > coord[i]) { + predicate = false; + break; + } + } + break; + + case FillMode::kDiagonal: + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i - 1] != coord[i]) { + predicate = false; + break; + } + } + break; + + case FillMode::kNone: // fall-through + + default: + predicate = false; + break; + } + + if (predicate) { + params.view.at(coord) = params.element; + } + } +}; + + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorClearPartialFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + /// + static_assert((Layout::kRank == 2), "TensorClearPartial is only supported for matrices"); + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Element element; + FillMode fill_mode; + int alignment; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params(): fill_mode(FillMode::kNone) { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_, + Element element_, + FillMode fill_mode_, + int alignment_ + ): + view(view_), element(element_), fill_mode(fill_mode_), alignment(alignment_) { + + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + CUTLASS_DEVICE + TensorClearPartialFunc(Params const ¶ms): params(params) { + + } + + /// Overwrites the element if it is within the covered region. + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + bool predicate = true; + + switch (params.fill_mode) { + + case FillMode::kLower: + if ((coord[0] >= coord[1]) || + ((coord[1] - coord[0]) >= params.alignment)) { + predicate = false; + break; + } + break; + + case FillMode::kUpper: + if ((coord[0] <= coord[1]) || + ((coord[0] - coord[1]) >= params.alignment)) { + predicate = false; + break; + } + break; + + case FillMode::kNone: // fall-through + + default: + predicate = false; + break; + } + + if (predicate) { + params.view.at(coord) = params.element; + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor everywhere with a unique value for its diagonal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillDiagonal( + TensorView view, ///< destination tensor + Element diag = Element(1), ///< value to write in the diagonal + Element other = Element(0), ///< value to write off the diagonal + cudaStream_t stream = nullptr) { + + typedef detail::TensorFillDiagonalFunc Func; + typedef typename Func::Params Params; + + TensorForEach( + view.extent(), + Params(view, diag, other), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/// Fills a tensor partially depending on fill mode. Elements not covered by the fillmode are +/// not written. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillPartial( + TensorView view, ///< destination tensor + Element element, + FillMode fill_mode, + cudaStream_t stream = nullptr) { + + typedef detail::TensorFillPartialFunc Func; + typedef typename Func::Params Params; + + TensorForEach( + view.extent(), + Params(view, element, fill_mode), + stream + ); +} + +/// Clears a tensor partially depending on fill mode and alignment. Elements on the wrong-side +/// of fillmode (upto the alignment) are overwritten with the user supplied element (typically zeros) +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorClearPartial( + TensorView view, ///< destination tensor + Element element, + FillMode fill_mode, + int alignment, + cudaStream_t stream = nullptr) { + + typedef detail::TensorClearPartialFunc Func; + typedef typename Func::Params Params; + + TensorForEach( + view.extent(), + Params(view, element, fill_mode, alignment), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with a uniform value +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFill( + TensorView view, ///< destination tensor + Element val = Element(0), ///< value to uniformly fill it with + cudaStream_t stream = nullptr) { + + TensorFillDiagonal(view, val, val, stream); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor's diagonal with 1 and 0 everywhere else. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillIdentity( + TensorView view, ///< destination tensor + cudaStream_t stream = nullptr) { + + TensorFillDiagonal(view, Element(1), Element(0), stream); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorUpdateDiagonalFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Element diag; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_ = TensorView(), + Element diag_ = Element(1) + ): + view(view_), diag(diag_) { + + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorUpdateDiagonalFunc(Params const ¶ms): params(params) { + + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + bool is_diag = true; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i] != coord[i - 1]) { + is_diag = false; + break; + } + } + + if (is_diag) { + params.view.at(coord) = params.diag; + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Writes a uniform value to the diagonal of a tensor without modifying off-diagonal elements. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorUpdateDiagonal( + TensorView view, ///< destination tensor + Element diag = Element(1), + cudaStream_t stream = nullptr) { + + typedef detail::TensorUpdateDiagonalFunc Func; + typedef typename Func::Params Params; + + TensorForEach( + view.extent(), + Params(view, diag), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorUpdateOffDiagonalFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Element other; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_ = TensorView(), + Element other_ = Element(0) + ): + view(view_), other(other_) { + + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorUpdateOffDiagonalFunc(Params const ¶ms): params(params) { + + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + bool is_diag = true; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i] != coord[i - 1]) { + is_diag = false; + break; + } + } + + if (!is_diag) { + params.view.at(coord) = params.other; + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Writes a uniform value to all elements in the tensor without modifying diagonal elements. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorUpdateOffDiagonal( + TensorView view, ///< destination tensor + Element other = Element(1), + cudaStream_t stream = nullptr) { + + typedef detail::TensorUpdateOffDiagonalFunc Func; + typedef typename Func::Params Params; + + TensorForEach( + view.extent(), + Params(view, other), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillLinearFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Array v; + Element s; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_, ///< destination tensor + Array const & v_, + Element s_ = Element(0) + ): + view(view_), v(v_), s(s_) { + + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorFillLinearFunc(Params const ¶ms): params(params) { + + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + Element sum = params.s; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Layout::kRank; ++i) { + if constexpr (is_complex::value) { + if constexpr (sizeof_bits::value <= 32) { + sum = Element(static_cast>(sum) + + static_cast>(params.v[i]) * static_cast>(coord[i])); + } + } + else if constexpr (sizeof_bits::value <= 32) { + if constexpr (std::numeric_limits::is_integer) { + sum = Element(static_cast(sum) + + static_cast(params.v[i]) * static_cast(coord[i])); + } + else { + sum = Element(static_cast(sum) + + static_cast(params.v[i]) * static_cast(coord[i])); + } + } + else { + sum += params.v[i] * coord[i]; + } + } + + params.view.at(coord) = sum; + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills tensor with a linear combination of its coordinate and another vector +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillLinear( + TensorView view, ///< destination tensor + Array const & v, + Element s = Element(0), + cudaStream_t stream = nullptr) { + + using Func = detail::TensorFillLinearFunc; + using Params = typename Func::Params; + + TensorForEach( + view.extent(), + Params(view, v, s), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values from a distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandom( + TensorView view, ///< destination tensor + uint64_t seed, + Distribution dist, + cudaStream_t stream = nullptr) { + + using Real = typename RealType::Type; + + if (dist.kind == Distribution::Gaussian) { + TensorFillRandomGaussian( + view, + seed, + static_cast(dist.gaussian.mean), + static_cast(dist.gaussian.stddev), + dist.int_scale, + stream); + } else if (dist.kind == Distribution::Uniform) { + TensorFillRandomUniform( + view, + seed, + static_cast(dist.uniform.max), + static_cast(dist.uniform.min), + dist.int_scale, + stream); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a block of data with sequential elements +template < + typename Element +> +void BlockFillSequential( + Element *ptr, + int64_t capacity, + Element v = Element(1), + Element s = Element(0)) { + + using Layout = layout::PackedVectorLayout; + Layout::TensorCoord size(static_cast(capacity)); // -Wconversion + Layout layout = Layout::packed(size); + TensorView view(ptr, layout, size); + + Array c; + c[0] = v; + + TensorFillLinear(view, c, s); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a block of data with sequential elements +template < + typename Element +> +void BlockFillRandom( + Element *ptr, + size_t capacity, + uint64_t seed, + Distribution dist, + cudaStream_t stream = nullptr) { + + using Real = typename RealType::Type; + + if (dist.kind == Distribution::Gaussian) { + BlockFillRandomGaussian( + ptr, + capacity, + seed, + static_cast(dist.gaussian.mean), + static_cast(dist.gaussian.stddev), + dist.int_scale, + stream); + } + else if (dist.kind == Distribution::Uniform) { + BlockFillRandomUniform( + ptr, + capacity, + seed, + static_cast(dist.uniform.max), + static_cast(dist.uniform.min), + dist.int_scale, + stream); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorCopyDiagonalInFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Element const *ptr; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_, ///< destination tensor + Element const *ptr_ + ): + view(view_), ptr(ptr_) { + + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorCopyDiagonalInFunc(Params const ¶ms): params(params) { + + } + + /// Only update the diagonal element + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + bool is_diagonal = true; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i] != coord[0]) { + is_diagonal = false; + } + } + if (is_diagonal) { + params.view.at(coord) = params.ptr[coord[0]]; + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies a diagonal in from host memory without modifying off-diagonal elements. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorCopyDiagonalIn( + TensorView view, ///< destination tensor + Element const *ptr, ///< dense buffer of elements + cudaStream_t stream = nullptr) { + + using Func = detail::TensorCopyDiagonalInFunc; + using Params = typename Func::Params; + + TensorForEach( + view.extent(), + Params(view, ptr), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + + +namespace detail { + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorCopyDiagonalOutFunc { + + /// View type + using TensorView = TensorView; + + /// Scalar type + typedef typename TensorView::Element T; + + /// Coordinate in tensor's index space + typedef typename TensorView::TensorCoord TensorCoord; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Element *ptr; + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + Params( + TensorView view_, ///< destination tensor + Element *ptr_ + ): + view(view_), ptr(ptr_) { + + } + }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + /// Device-side initialization of RNG + CUTLASS_DEVICE + TensorCopyDiagonalOutFunc(Params const ¶ms): params(params) { + + } + + /// Compute random value and update RNG state + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + bool is_diagonal = true; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i] != coord[0]) { + is_diagonal = false; + } + } + if (is_diagonal) { + params.ptr[coord[0]] = params.view.at(coord); + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies the diagonal of a tensor into a dense buffer in host memory. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorCopyDiagonalOut( + Element *ptr, ///< dense buffer of elements + TensorView view, ///< source tensor + cudaStream_t stream = nullptr) { + + using Func = detail::TensorCopyDiagonalOutFunc; + using Params = typename Func::Params; + + TensorForEach( + view.extent(), + Params(view, ptr), + /*grid_size*/0, /*block_size*/0, + stream + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_foreach.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_foreach.h new file mode 100644 index 0000000000000000000000000000000000000000..bae68e7037c43afffac9028f199f1ad462ab142a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_foreach.h @@ -0,0 +1,144 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include +#include "cutlass/cutlass.h" +#include "cutlass/util/reference/device/kernel/tensor_foreach.h" + +namespace cutlass { +namespace reference { +namespace device { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Launches a kernel calling a functor for each element in a tensor's index space. +template +struct TensorForEach { + + /// Constructor performs the operation. + TensorForEach( + Coord size, Params params = Params(), + int grid_size = 0, int block_size = 0, + cudaStream_t stream = nullptr) { + + if (!grid_size || !block_size) { + + // if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API + cudaError_t result = cudaOccupancyMaxPotentialBlockSize( + &grid_size, + &block_size, + reinterpret_cast(kernel::TensorForEach)); + + if (result != cudaSuccess) { + throw std::runtime_error("Failed to query occupancy."); + } + + // Limit block size. This has the effect of increasing the number of items processed by a + // single thread and reduces the impact of initialization overhead. + block_size = (block_size < 128 ? block_size : 128); + } + + dim3 grid(grid_size, 1, 1); + dim3 block(block_size, 1, 1); + + kernel::TensorForEach<<< grid, block, 0, stream >>>(size, params); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Launches a kernel calling a functor for each element along a tensor's diagonal +template +struct TensorDiagonalForEach { + + /// Constructor performs the operation + TensorDiagonalForEach( + Coord size, Params params = Params(), + int start = 0, int end = -1, + int block_size = 128, cudaStream_t stream = nullptr) { + + if (end < 0) { + end = size.min(); + } + + dim3 block(block_size, 1, 1); + dim3 grid((end - start + block_size - 1) / block_size, 1, 1); + + kernel::TensorDiagonalForEach<<< grid, block, 0, stream >>>( + size, params, start, end); + } +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct BlockForEach { + + /// Constructor performs the operation. + BlockForEach( + Element *ptr, + size_t capacity, + typename Func::Params params = typename Func::Params(), + int grid_size = 0, + int block_size = 0, + cudaStream_t stream = nullptr) { + + if (!grid_size || !block_size) { + + // if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API + cudaError_t result = cudaOccupancyMaxPotentialBlockSize( + &grid_size, + &block_size, + reinterpret_cast(kernel::BlockForEach)); + + if (result != cudaSuccess) { + throw std::runtime_error("Failed to query occupancy."); + } + + // Limit block size. This has the effect of increasing the number of items processed by a + // single thread and reduces the impact of initialization overhead. + block_size = (block_size < 128 ? block_size : 128); + } + + dim3 grid(grid_size, 1, 1); + dim3 block(block_size, 1, 1); + + kernel::BlockForEach<<< grid, block, 0, stream >>>(ptr, capacity, params); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_reduce.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..09c11db404e7ad4c774c4fa646165ea95453bdd2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_reduce.h @@ -0,0 +1,510 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_view.h" +#include "cutlass/util/device_memory.h" +#include "cutlass/util/reference/detail/linear_to_coordinate.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reference { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace kernel { + +template < + typename Element, + typename Layout, + typename ComputeType, + typename ReduceOp, + typename TransformOp, + int kBlockSize = 128 +> +__global__ void TensorTransformReducePartial( + TensorView view, /// View of the tensor to reduce over + ComputeType identity, /// Identity element of the reduction operation + ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType + TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType + ComputeType *workspace) { /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0] + + int64_t idx = threadIdx.x + blockIdx.x * blockDim.x; + int64_t size = view.size(); + + __shared__ ComputeType scratchpad[kBlockSize]; + + for (; idx < size; idx += blockDim.x * gridDim.x) { + + // Map linear thread ID onto tensor coordinate + typename Layout::TensorCoord coord; + + cutlass::reference::detail::LinearToCoordinate()(coord, idx, view.extent()); + + if (view.contains(coord)) { + + // Fetch element + Element x = view.at(coord); + + // Transform + identity = reduce(identity, transform(x)); + } + } + + scratchpad[threadIdx.x] = identity; + + __syncthreads(); + + // One thread performs the final reduction and stores out. This could be enhanced via + // a tree reduction and pipelining. + if (threadIdx.x == 0) { + + for (int i = 1; i < kBlockSize; ++i) { + identity = reduce(identity, scratchpad[i]); + } + + workspace[blockIdx.x] = identity; + } +} + +template < + typename Element, + typename Layout, + typename ComputeType, + typename ReduceOp, + typename TransformOp, + int kBlockSize = 128 +> +__global__ void TensorTransformReducePartial( + TensorView view_A, /// View of the tensor to reduce over + TensorView view_B, /// View of the tensor to reduce over + ComputeType identity, /// Identity element of the reduction operation + ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType + TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType + ComputeType *workspace) { /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0] + + int64_t idx = threadIdx.x + blockIdx.x * blockDim.x; + int64_t size = view_A.size(); + + __shared__ ComputeType scratchpad[kBlockSize]; + + for (; idx < size; idx += blockDim.x * gridDim.x) { + + // Map linear thread ID onto tensor coordinate + typename Layout::TensorCoord coord; + + cutlass::reference::detail::LinearToCoordinate()(coord, idx, view_A.extent()); + + if (view_A.contains(coord)) { + + // Fetch element + Element a = view_A.at(coord); + Element b = view_B.at(coord); + + // Transform + identity = reduce(identity, transform(a, b)); + } + } + + scratchpad[threadIdx.x] = identity; + + __syncthreads(); + + // One thread performs the final reduction and stores out. This could be enhanced via + // a tree reduction and pipelining. + if (threadIdx.x == 0) { + + for (int i = 1; i < kBlockSize; ++i) { + identity = reduce(identity, scratchpad[i]); + } + + workspace[blockIdx.x] = identity; + } +} + + +template < + typename ComputeType, + typename ReduceOp, + int kBlockSize = 32 +> +__global__ void TensorTransformReduceFinalize( + ComputeType *workspace, + ComputeType identity, + int workspace_size, + ReduceOp reduce) { + + __shared__ ComputeType scratchpad[kBlockSize]; + + for (int idx = threadIdx.x; idx < workspace_size; idx += kBlockSize) { + identity = reduce(identity, workspace[idx]); + } + + scratchpad[threadIdx.x] = identity; + + __syncthreads(); + + if (threadIdx.x == 0) { + + for (int i = 1; i < kBlockSize; ++i) { + identity = reduce(identity, scratchpad[i]); + } + + workspace[0] = identity; + } +} + +} // namespace kernel + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Transform-reduce operation over the elements of a tensor +template < + typename Element, + typename Layout, + typename ComputeType, + typename ReduceOp, + typename TransformOp +> +ComputeType TensorTransformReduce( + TensorView view, /// View of the tensor to reduce over + ComputeType identity, /// Identity element of the reduction operation + ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType + TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType + ComputeType *workspace, /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0] + int workspace_size, /// Number of elements in workspace + cudaStream_t stream = nullptr, /// CUDA stream to launch into + bool copy_out = true /// If true, the value of workspace[0] is copied to host and returned. Otherwise, `identity` is returned. +) { + + int const kBlockSize = 128; + + dim3 block(kBlockSize, 1); + dim3 grid(workspace_size, 1); + + kernel::TensorTransformReducePartial< + Element, Layout, ComputeType, ReduceOp, TransformOp, kBlockSize + ><<< grid, block, 0, stream >>>( + view, identity, reduce, transform, workspace + ); + + int const kFinalizeBlockSize = 32; + + kernel::TensorTransformReduceFinalize< + ComputeType, ReduceOp, kFinalizeBlockSize + ><<< dim3(1, 1), dim3(kFinalizeBlockSize, 1), 0, stream >>>( + workspace, identity, workspace_size, reduce + ); + + if (copy_out) { + cudaError_t result = cudaMemcpy(&identity, workspace, sizeof(identity), cudaMemcpyDeviceToHost); + if (result != cudaSuccess) { + throw std::runtime_error("cudaMemcpy() failed"); + } + } + + return identity; +} + +/// Transform-reduce operation over the elements of two tensors, zipped together +template < + typename Element, + typename Layout, + typename ComputeType, + typename ReduceOp, + typename TransformOp +> +ComputeType TensorTransformReduce( + TensorView view_A, /// View of the tensor to reduce over + TensorView view_B, /// View of the tensor to reduce over + ComputeType identity, /// Identity element of the reduction operation + ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType + TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType + ComputeType *workspace, /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0] + int workspace_size, /// Number of elements in workspace + cudaStream_t stream = nullptr, /// CUDA stream to launch into + bool copy_out = true /// If true, the value of workspace[0] is copied to host and returned. Otherwise, `identity` is returned. +) { + + if (view_A.extent() != view_B.extent()) { + throw std::runtime_error("Extents must be equal."); + } + + int const kBlockSize = 128; + + dim3 block(kBlockSize, 1); + dim3 grid(workspace_size, 1); + + kernel::TensorTransformReducePartial< + Element, Layout, ComputeType, ReduceOp, TransformOp, kBlockSize + ><<< grid, block, 0, stream >>>( + view_A, view_B, identity, reduce, transform, workspace + ); + + int const kFinalizeBlockSize = 32; + + kernel::TensorTransformReduceFinalize< + ComputeType, ReduceOp, kFinalizeBlockSize + ><<< dim3(1, 1), dim3(kFinalizeBlockSize, 1), 0, stream >>>( + workspace, identity, workspace_size, reduce + ); + + if (copy_out) { + cudaError_t result = cudaMemcpy(&identity, workspace, sizeof(identity), cudaMemcpyDeviceToHost); + if (result != cudaSuccess) { + throw std::runtime_error("cudaMemcpy() failed"); + } + } + + return identity; +} + +/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side +/// workspace +template < + typename Element, + typename Layout, + typename ComputeType, + typename ReduceOp, + typename TransformOp +> +ComputeType TensorTransformReduce( + TensorView view, + ComputeType identity, + ReduceOp reduce, + TransformOp transform, + cudaStream_t stream = nullptr, + int workspace_size = 0 +) { + + // Optionally query for the SM count to size the workspace. + if (!workspace_size) { + + int device_idx = 0; + cudaDeviceProp prop; + + cudaError_t result = cudaGetDevice(&device_idx); + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() failed"); + } + + result = cudaGetDeviceProperties(&prop, device_idx); + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProp() failed"); + } + + workspace_size = int(prop.multiProcessorCount); + } + + DeviceAllocation workspace(workspace_size); + + ComputeType output = TensorTransformReduce( + view, + identity, + reduce, + transform, + workspace.get(), + workspace_size, + stream, + true); + + return output; +} + + +/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side +/// workspace +template < + typename Element, + typename Layout, + typename ComputeType, + typename ReduceOp, + typename TransformOp +> +ComputeType TensorTransformReduce( + TensorView view_A, + TensorView view_B, + ComputeType identity, + ReduceOp reduce, + TransformOp transform, + cudaStream_t stream = nullptr, + int workspace_size = 0 +) { + + // Optionally query for the SM count to size the workspace. + if (!workspace_size) { + + int device_idx = 0; + cudaDeviceProp prop; + + cudaError_t result = cudaGetDevice(&device_idx); + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDevice() failed"); + } + + result = cudaGetDeviceProperties(&prop, device_idx); + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetDeviceProp() failed"); + } + + workspace_size = int(prop.multiProcessorCount); + } + + DeviceAllocation workspace(workspace_size); + + ComputeType output = TensorTransformReduce( + view_A, + view_B, + identity, + reduce, + transform, + workspace.get(), + workspace_size, + stream, + true); + + return output; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Helper to compute the sum of the elements of a tensor +template < + typename Element, + typename Layout, + typename ComputeType = Element +> +ComputeType TensorSum( + TensorView view, + ComputeType identity = ComputeType(), + cudaStream_t stream = nullptr, + int workspace_size = 0 +) { + + plus reduce; + NumericConverter transform; + + return TensorTransformReduce( + view, identity, reduce, transform, stream, workspace_size); +} + +/// Helper to compute the sum of the squares of the elements of a tensor +template < + typename Element, + typename Layout, + typename ComputeType = Element +> +ComputeType TensorSumSq( + TensorView view, + ComputeType identity = ComputeType(), + cudaStream_t stream = nullptr, + int workspace_size = 0 +) { + + plus reduce; + magnitude_squared transform; + + return TensorTransformReduce( + view, identity, reduce, transform, stream, workspace_size); +} + +/// Helper to compute the norm of the elements of a tensor. +template < + typename Element, + typename Layout, + typename ComputeType = double +> +ComputeType TensorNorm( + TensorView view, + ComputeType identity = ComputeType(), + cudaStream_t stream = nullptr, + int workspace_size = 0 +) { + + return std::sqrt(TensorSumSq(view, identity, stream, workspace_size)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Helper to compute the sum of the squares of the differences of two tensors +template < + typename Element, + typename Layout, + typename ComputeType = double +> +ComputeType TensorSumSqDiff( + TensorView view_A, + TensorView view_B, + ComputeType identity = ComputeType(), + cudaStream_t stream = nullptr, + int workspace_size = 0 +) { + + plus reduce; + magnitude_squared_difference transform; + + return TensorTransformReduce( + view_A, view_B, identity, reduce, transform, stream, workspace_size); +} + + +/// Helper to compute the norm of the tensor computed as the difference of two tensors in memory +template < + typename Element, + typename Layout, + typename ComputeType = double +> +ComputeType TensorNormDiff( + TensorView view_A, + TensorView view_B, + ComputeType identity = ComputeType(), + cudaStream_t stream = nullptr, + int workspace_size = 0 +) { + + return std::sqrt(TensorSumSqDiff(view_A, view_B, identity, stream, workspace_size)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_relu.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_relu.h new file mode 100644 index 0000000000000000000000000000000000000000..c78f1dc95d9394f0d76600393fbe20397506a4d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_relu.h @@ -0,0 +1,141 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines device-side elementwise operations on TensorView. Note, the operations defined + in this header are not specialized for any particular data layout and are therefore not + intended to offer the best possible performance. Rather, they are intended to be generic + reference implementations to support the CUTLASS unit tests. +*/ + +#pragma once + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/tensor_view.h" + +#include "cutlass/util/reference/device/tensor_foreach.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reference { +namespace device { + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorReLuFunc { + + /// View type + using TensorView = TensorView; + + /// Coordinate in tensor's index space + using TensorCoord = typename TensorView::TensorCoord; + + /// Parameters structure + struct Params { + + // + // Data members + // + + TensorView view; + Element threshold; + + + // + // Methods + // + + Params( + TensorView view_ = TensorView(), + Element threshold_ = Element(0) + ): + view(view_), threshold(threshold_) { + + } + }; + + // + // Data members + // + + Params params; + + // + // Methods + // + + CUTLASS_DEVICE + TensorReLuFunc(Params const ¶ms): params(params) { + + } + + CUTLASS_DEVICE + void operator()(TensorCoord const &coord) { + + Element const & value = params.view.at(coord); + params.view.at(coord) = (value < params.threshold) ? params.threshold : value; + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Apply ReLu on a tensor +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorReLu( + TensorView view, ///< destination tensor + Element threshold = Element(0)) { ///< ReLu threshold + + using Func = detail::TensorReLuFunc; + using Params = typename Func::Params; + + TensorForEach( + view.extent(), + Params(view, threshold) + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/thread/gemm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/thread/gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..094f716c3f390b87a02d70d45645e5e5e936419b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/thread/gemm.h @@ -0,0 +1,186 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for GEMM in host-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" + +namespace cutlass { +namespace reference { +namespace device { +namespace thread { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Thread-level blocked general matrix product. +// +// Note, this is a reference implementation. Performance is not expected to approach peak. +// +template < + typename TensorRefA, + typename TensorRefB, + typename TensorRefC, + typename ScalarType, + typename AccumulatorType, + typename OutputTile, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +struct Gemm { + + using ElementA = typename TensorRefA::Element; + using ElementB = typename TensorRefB::Element; + using ElementC = typename TensorRefC::Element; + + // + // Data members + // + + /// Tile for A operand + ElementA A_tile[OutputTile::kColumn]; + + /// Tile for B operand + ElementB B_tile[OutputTile::kRow]; + + /// Tile for Accumulator + AccumulatorType accum[OutputTile::kColumn][OutputTile::kRow]; + + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Gemm(AccumulatorType initial_accum = AccumulatorType(0)) { + + // Clear fetch registers + for (int i = 0; i < OutputTile::kColumn; ++i) { + A_tile[i] = ElementA(0); + } + + for (int j = 0; j < OutputTile::kColumn; ++j) { + B_tile[j] = ElementB(0); + } + + // Clear accumulators + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < OutputTile::kColumn; ++j) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < OutputTile::kRow; ++i) { + accum[j][i] = initial_accum; + } + } + } + + /// Computes a matrix product + CUTLASS_HOST_DEVICE + Gemm & multiply_add( + gemm::GemmCoord problem_size, + TensorRefA tensor_a, + TensorRefB tensor_b, + MatrixCoord output_coord = MatrixCoord()) { + + InnerProductOp inner_product_op; + + // Loop over the GEMM K dimension + CUTLASS_PRAGMA_NO_UNROLL + for (int k = 0; k < problem_size.k(); ++k) { + + // Fetch a slice of the A matrix + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < OutputTile::kColumn; ++i) { + if (output_coord.row() + i < problem_size.m()) { + A_tile[i] = tensor_a.at(make_Coord(output_coord.row() + i, k)); + } + } + + // Fetch a slice of the B matrix + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < OutputTile::kRow; ++j) { + if (output_coord.column() + j < problem_size.n()) { + B_tile[j] = tensor_b.at(make_Coord(k, output_coord.column() + j)); + } + } + + // Compute an accumulated matrix product + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < OutputTile::kRow; ++j) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < OutputTile::kColumn; ++i) { + accum[j][i] = inner_product_op(A_tile[i], B_tile[j], accum[j][i]); + } + } + } + + return *this; + } + + /// Performs linear scaling of matrix product and updates output tensor + CUTLASS_HOST_DEVICE + Gemm & epilogue( + gemm::GemmCoord problem_size, + ScalarType alpha, + ScalarType beta, + TensorRefC tensor_c, + TensorRefC tensor_d, + MatrixCoord output_coord = MatrixCoord()) { + + ConvertOp convert_op; + + // Update the output tensor + for (int j = 0; j < OutputTile::kRow; ++j) { + for (int i = 0; i < OutputTile::kColumn; ++i) { + MatrixCoord coord = output_coord + MatrixCoord(i, j); + if (coord.row() < problem_size.m() && coord.column() < problem_size.n()) { + + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[j][i]) + + beta * ScalarType(tensor_c.at(coord)) + ); + } + } + } + + return *this; + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace thread +} // namespace device +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/convolution.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..64c1cd9bd70f53a2bf98c2a9d46700fd184bfebf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/convolution.h @@ -0,0 +1,797 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Reference implementation for convolution in host-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/functional.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" +#include "cutlass/conv/convolution.h" +#include "cutlass/conv/conv2d_problem_size.h" +#include "cutlass/conv/conv3d_problem_size.h" +#include + +namespace cutlass { +namespace reference { +namespace host { + +//////////////////////////////////////////////////////////////////////////////////////////////////// +/// Forward propagation +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// y = conv2d(x, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ElementD = ElementC, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Conv2dFprop( + conv::Conv2dProblemSize problem_size, + TensorRef tensor_x, + TensorRef tensor_w, + TensorRef tensor_y_in, + TensorRef tensor_y_out, + ElementCompute alpha, + ElementCompute beta) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + // Apply MMA and accumulate ElementAccumulator + for (int n = 0; n < problem_size.N; ++n) { + for (int p = 0; p < problem_size.P; ++p) { + for (int q = 0; q < problem_size.Q; ++q) { + for (int k = 0; k < problem_size.K; ++k) { + + int group_idx = k / (problem_size.K / problem_size.groups); + int channels_per_group = problem_size.C / problem_size.groups; + + ElementAccumulator acc = ElementAccumulator(); + + for (int r = 0; r < problem_size.R; ++r) { + for (int s = 0; s < problem_size.S; ++s) { + for (int c = 0; c < channels_per_group; ++c) { + + int filter_r = r; + int filter_s = s; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_r = problem_size.R - 1 - r; + filter_s = problem_size.S - 1 - s; + } + + int h = p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h; + int w = q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w; + + if (h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W) { + + ElementA a = tensor_x.at({n, h, w, c + group_idx * channels_per_group}); + ElementB b = tensor_w.at({k, r, s, c}); + + acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc); + + } + } + } + } + + // Apply Epilogue, compute ElementCompute, convert and store ElementC + ElementC c_ref = ElementC(); + + if (beta != ElementCompute()) { + c_ref = tensor_y_in.at(cutlass::make_Coord(n, p, q, k)); + } + + tensor_y_out.at(cutlass::make_Coord(n, p, q, k)) = + convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref)); + } + } + } + } +} + +/// Depthwise-separable convolution +template , + typename InnerProductOp = multiply_add> +void Depsep_Fprop(cutlass::TensorView tensor_A, + cutlass::TensorView tensor_B, + cutlass::TensorView tensor_C, + cutlass::TensorView tensor_D, + ElementCompute alpha, + ElementCompute beta, + cutlass::Tensor4DCoord padding = cutlass::Tensor4DCoord(), + cutlass::Coord<2> conv_stride = cutlass::Coord<2>(), + cutlass::Coord<2> dilation = cutlass::Coord<2>(), + cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + // Apply MMA and accumulate ElementAccumulator + for (int n = 0; n < tensor_C.extent().n(); ++n) { + for (int p = 0; p < tensor_C.extent().h(); ++p) { + for (int q = 0; q < tensor_C.extent().w(); ++q) { + for (int g = 0; g < tensor_C.extent().c(); ++g) { + ElementAccumulator acc = ElementAccumulator(); + for (int r = 0; r < tensor_B.extent().h(); ++r) { + for (int s = 0; s < tensor_B.extent().w(); ++s) { + + // input activation H and W + int h = p * conv_stride[0] - padding[0] + r * dilation[0]; + int w = q * conv_stride[1] - padding[2] + s * dilation[1]; + + if (h < tensor_A.extent().h() && h >= 0 && w < tensor_A.extent().w() && w >= 0) { + ElementA a = tensor_A.at(cutlass::make_Coord(n, h, w, g)); + + ElementB b = (mode == cutlass::conv::Mode::kCrossCorrelation) + ? tensor_B.at(cutlass::make_Coord(g, r, s, 0)) + : tensor_B.at(cutlass::make_Coord( + g, tensor_B.extent().h() - r - 1, tensor_B.extent().w() - s - 1, 0)); + + acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc); + } + } + } + + // Apply Epilogue, compute ElementCompute, convert and store ElementC + ElementC c_ref = tensor_C.at(cutlass::make_Coord(n, p, q, g)); + tensor_D.at(cutlass::make_Coord(n, p, q, g)) = + convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref)); + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +/// Dgrad +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// dx = dgrad(dy, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ElementD = ElementC, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Conv2dDgrad( + cutlass::conv::Conv2dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_w, + TensorRef tensor_dx_in, + TensorRef tensor_dx_out, + ElementCompute alpha, + ElementCompute beta) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + // Apply MMA and accumulate ElementAccumulator + for (int n = 0; n < problem_size.N; ++n) { + for (int h = 0; h < problem_size.H; ++h) { + for (int w = 0; w < problem_size.W; ++w) { + for (int c = 0; c < problem_size.C; ++c) { + + ElementAccumulator acc = ElementAccumulator(); + + for (int r = 0; r < problem_size.R; ++r) { + for (int s = 0; s < problem_size.S; ++s) { + for (int k = 0; k < problem_size.K; ++k) { + + int filter_r = r; + int filter_s = s; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_r = problem_size.R - 1 - r; + filter_s = problem_size.S - 1 - s; + } + + int p = h + problem_size.pad_h - filter_r * problem_size.dilation_h; + int q = w + problem_size.pad_w - filter_s * problem_size.dilation_w; + + if (p >= 0 && (p % problem_size.stride_h) == 0 && + q >= 0 && (q % problem_size.stride_w) == 0) { + + p = p / problem_size.stride_h; + q = q / problem_size.stride_w; +#if 0 + std::cout << "row:" + << n * problem_size.H * problem_size.W + + h * problem_size.W + + w << " " + << "n, p, q: (" + << n << ", " + << p << ", " + << q << ") * " + << "r, s: (" + << r << ", " + << s << ") [" + << ((p < problem_size.P && q < problem_size.Q) ? "true":"false") << "]" + << std::endl; +#endif + if (p < problem_size.P && q < problem_size.Q) { + + ElementA a = tensor_dy.at(cutlass::make_Coord(n, p, q, k)); + ElementB b = tensor_w.at(cutlass::make_Coord(k, r, s, c)); + + acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc); + } + } + + } // for (K) + } // for (S) + } // for (R) + + // Apply Epilogue, compute ElementCompute, convert and store ElementC + ElementC c_ref = ElementC(); + + if (beta != ElementCompute()) { + c_ref = tensor_dx_in.at(cutlass::make_Coord(n, h, w, c)); + } + + tensor_dx_out.at(cutlass::make_Coord(n, h, w, c)) = + convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref)); + + } // for (C) + } // for (W) + } // for (H) + } // for (N) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +/// Wgrad +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// dw = wgrad(dy, x) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ElementD = ElementC, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Conv2dWgrad( + cutlass::conv::Conv2dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_x, + TensorRef tensor_dw_in, + TensorRef tensor_dw_out, + ElementCompute alpha, + ElementCompute beta) { + + InnerProductOp inner_product_op; + ConvertOp convert_op; + + // Apply MMA and accumulate ElementAccumulator + for (int k = 0; k < problem_size.K; ++k) { + for (int r = 0; r < problem_size.R; ++r) { + for (int s = 0; s < problem_size.S; ++s) { + for (int c = 0; c < problem_size.C; ++c) { + + ElementAccumulator acc = ElementAccumulator(); + + for (int n = 0; n < problem_size.N; ++n) { + for (int p = 0; p < problem_size.P; ++p) { + for (int q = 0; q < problem_size.Q; ++q) { + + cutlass::Tensor4DCoord b_coord; + + int filter_r = r; + int filter_s = s; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_r = problem_size.R - 1 - r; + filter_s = problem_size.S - 1 - s; + } + + b_coord = make_Coord( + n, + p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h, + q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w, + c); + + if (b_coord.h() < problem_size.H && b_coord.h() >= 0 && + b_coord.w() < problem_size.W && b_coord.w() >= 0) { + + ElementAccumulator a = ElementAccumulator(tensor_dy.at(cutlass::make_Coord(n, p, q, k))); + ElementAccumulator b = ElementAccumulator(tensor_x.at(b_coord)); + acc = inner_product_op(a, b, acc); + } + } + } + } + + // Apply Epilogue, compute ElementCompute, convert and store ElementC + ElementC c_ref = ElementC(); + + if (beta != ElementCompute()) { + c_ref = tensor_dw_in.at(cutlass::make_Coord(k, r, s, c)); + } + + tensor_dw_out.at(cutlass::make_Coord(k, r, s, c)) = + convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref)); + + } // for (C) + } // for (S) + } // for (R) + } // for (K) +} + +/// Generic 2D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ElementD = ElementC, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Conv2d( + conv::Operator convolutional_operator, + conv::Conv2dProblemSize problem_size, + TensorRef tensor_A, + TensorRef tensor_B, + TensorRef tensor_C, + TensorRef tensor_D, + ElementCompute alpha, + ElementCompute beta) { + + switch (convolutional_operator) { + case conv::Operator::kFprop: + Conv2dFprop< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ElementD, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta); + break; + + case conv::Operator::kDgrad: + Conv2dDgrad< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ElementD, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta); + break; + + case conv::Operator::kWgrad: + Conv2dWgrad< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ElementD, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta); + break; + + default: + break; + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +/// 3D convolution +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// y = conv3d(x, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Conv3dFprop( + conv::Conv3dProblemSize problem_size, + TensorRef tensor_x, + TensorRef tensor_w, + TensorRef tensor_y_in, + TensorRef tensor_y_out, + ElementCompute alpha, + ElementCompute beta) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + // Apply MMA and accumulate ElementAccumulator + for (int n = 0; n < problem_size.N; ++n) { + for (int z = 0; z < problem_size.Z; ++z) { + for (int p = 0; p < problem_size.P; ++p) { + for (int q = 0; q < problem_size.Q; ++q) { + for (int k = 0; k < problem_size.K; ++k) { + + ElementAccumulator acc = ElementAccumulator(); + + for (int t = 0; t < problem_size.T; ++t) { + for (int r = 0; r < problem_size.R; ++r) { + for (int s = 0; s < problem_size.S; ++s) { + for (int c = 0; c < problem_size.C; ++c) { + + int filter_t = t; + int filter_r = r; + int filter_s = s; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_t = problem_size.T - 1 - t; + filter_r = problem_size.R - 1 - r; + filter_s = problem_size.S - 1 - s; + } + + int d = z * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d; + int h = p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h; + int w = q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w; + + if (d >= 0 && d < problem_size.D && + h >=0 && h < problem_size.H && + w >= 0 && w < problem_size.W) { + + ElementA a = tensor_x.at({n, d, h, w, c}); + ElementB b = tensor_w.at({k, t, r, s, c}); + + acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc); + } + } + } + } + } + + // Apply Epilogue, compute ElementCompute, convert and store ElementC + ElementC c_ref = ElementC(); + + if (beta != ElementCompute()) { + c_ref = tensor_y_in.at(cutlass::make_Coord(n, z, p, q, k)); + } + + tensor_y_out.at(cutlass::make_Coord(n, z, p, q, k)) = + convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref)); + } + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +/// Dgrad +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// dx = dgrad(dy, w) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Conv3dDgrad( + cutlass::conv::Conv3dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_w, + TensorRef tensor_dx_in, + TensorRef tensor_dx_out, + ElementCompute alpha, + ElementCompute beta) { + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + // Apply MMA and accumulate ElementAccumulator + for (int n = 0; n < problem_size.N; ++n) { + for (int d = 0; d < problem_size.D; ++d) { + for (int h = 0; h < problem_size.H; ++h) { + for (int w = 0; w < problem_size.W; ++w) { + for (int c = 0; c < problem_size.C; ++c) { + + ElementAccumulator acc = ElementAccumulator(); + + for (int t = 0; t < problem_size.T; ++t) { + for (int r = 0; r < problem_size.R; ++r) { + for (int s = 0; s < problem_size.S; ++s) { + for (int k = 0; k < problem_size.K; ++k) { + + int filter_t = t; + int filter_r = r; + int filter_s = s; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_t = problem_size.T - 1 - t; + filter_r = problem_size.R - 1 - r; + filter_s = problem_size.S - 1 - s; + } + + int z = d + problem_size.pad_d - filter_t * problem_size.dilation_d; + int p = h + problem_size.pad_h - filter_r * problem_size.dilation_h; + int q = w + problem_size.pad_w - filter_s * problem_size.dilation_w; + + if (z >= 0 && (z % problem_size.stride_d) == 0 && + p >= 0 && (p % problem_size.stride_h) == 0 && + q >= 0 && (q % problem_size.stride_w) == 0) { + + z = z / problem_size.stride_d; + p = p / problem_size.stride_h; + q = q / problem_size.stride_w; + + if (z < problem_size.Z && p < problem_size.P && q < problem_size.Q) { + + ElementA a = tensor_dy.at(cutlass::make_Coord(n, z, p, q, k)); + ElementB b = tensor_w.at(cutlass::make_Coord(k, t, r, s, c)); + + acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc); + } + } + + } // for (K) + } // for (S) + } // for (R) + } // for (T) + + // Apply Epilogue, compute ElementCompute, convert and store ElementC + ElementC c_ref = ElementC(); + + if (beta != ElementCompute()) { + c_ref = tensor_dx_in.at(cutlass::make_Coord(n, d, h, w, c)); + } + + tensor_dx_out.at(cutlass::make_Coord(n, d, h, w, c)) = + convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref)); + + } // for (C) + } // for (W) + } // for (H) + } // for (D) + } // for (N) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +/// Wgrad +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// dw = wgrad(dy, x) +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Conv3dWgrad( + cutlass::conv::Conv3dProblemSize problem_size, + TensorRef tensor_dy, + TensorRef tensor_x, + TensorRef tensor_dw_in, + TensorRef tensor_dw_out, + ElementCompute alpha, + ElementCompute beta) { + + InnerProductOp inner_product_op; + ConvertOp convert_op; + + // Apply MMA and accumulate ElementAccumulator + for (int k = 0; k < problem_size.K; ++k) { + for (int t = 0; t < problem_size.T; ++t) { + for (int r = 0; r < problem_size.R; ++r) { + for (int s = 0; s < problem_size.S; ++s) { + for (int c = 0; c < problem_size.C; ++c) { + + ElementAccumulator acc = ElementAccumulator(); + + for (int n = 0; n < problem_size.N; ++n) { + for (int z = 0; z < problem_size.Z; ++z) { + for (int p = 0; p < problem_size.P; ++p) { + for (int q = 0; q < problem_size.Q; ++q) { + + int filter_t = t; + int filter_r = r; + int filter_s = s; + + if (problem_size.mode == cutlass::conv::Mode::kConvolution) { + filter_t = problem_size.T - 1 - t; + filter_r = problem_size.R - 1 - r; + filter_s = problem_size.S - 1 - s; + } + + Tensor5DCoord b_coord = make_Coord( + n, + z * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d, + p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h, + q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w, + c); + + if (b_coord.d() < problem_size.D && b_coord.d() >= 0 && + b_coord.h() < problem_size.H && b_coord.h() >= 0 && + b_coord.w() < problem_size.W && b_coord.w() >= 0) { + + ElementAccumulator a = ElementAccumulator(tensor_dy.at(cutlass::make_Coord(n, z, p, q, k))); + ElementAccumulator b = ElementAccumulator(tensor_x.at(b_coord)); + + acc = inner_product_op(a, b, acc); + } + } + } + } + } + + // Apply Epilogue, compute ElementCompute, convert and store ElementC + ElementC c_ref = ElementC(); + + if (beta != ElementCompute()) { + c_ref = tensor_dw_in.at(cutlass::make_Coord(k, t, r, s, c)); + } + + tensor_dw_out.at(cutlass::make_Coord(k, t, r, s, c)) = + convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref)); + + } // for (C) + } // for (S) + } // for (R) + } // for (T) + } // for (K) +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Generic 3D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ElementCompute, + typename ElementAccumulator = ElementCompute, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Conv3d( + conv::Operator convolutional_operator, + conv::Conv3dProblemSize problem_size, + TensorRef tensor_A, + TensorRef tensor_B, + TensorRef tensor_C, + TensorRef tensor_D, + ElementCompute alpha, + ElementCompute beta) { + + switch (convolutional_operator) { + case conv::Operator::kFprop: + Conv3dFprop< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta); + break; + + case conv::Operator::kDgrad: + Conv3dDgrad< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta); + break; + + case conv::Operator::kWgrad: + Conv3dWgrad< + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + ElementCompute, + ElementAccumulator, + ConvertOp, InnerProductOp + >(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta); + break; + + default: + break; + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/error_metrics.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/error_metrics.h new file mode 100644 index 0000000000000000000000000000000000000000..0b4285c0173e3555bcaa8af2c3d51d797fb730ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/error_metrics.h @@ -0,0 +1,66 @@ + +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/util/reference/host/tensor_reduce.h" +#include "cutlass/core_io.h" + +namespace cutlass { +namespace reference { +namespace host { + +/// Helper to compute the relative error metric for tensor A_computed w.r.t. to tensor A_reference +template < + typename Element, + typename Layout, + typename ComputeType = double +> +ComputeType TensorRelativeErrorMetric( + TensorView view_A_computed, + TensorView view_B_reference, + ComputeType identity = ComputeType() +) { + + return cutlass::reference::host::TensorNormDiff(view_A_computed, view_B_reference, identity) / + cutlass::reference::host::TensorNorm(view_B_reference, identity); +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..85cf51c930b87c8de86d833159f9a1aa49275f09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm.h @@ -0,0 +1,531 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for GEMM in host-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/numeric_types.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" + +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/arch/mma.h" +#include "cutlass/util/host_tensor.h" + +namespace cutlass { +namespace reference { +namespace host { + +template +struct CastIfScalar { + static Out cast(In in) { + return Out(in); + } +}; + +template +struct CastIfScalar, In> { + typedef cutlass::complex Out; + static Out cast(In in) { + return Out(static_cast(in)); + } +}; + +template +struct CastIfScalar, cutlass::complex> { + typedef cutlass::complex Out; + typedef cutlass::complex In; + static Out cast(In in) { + return Out(in); + } +}; + +template +Out cast_if_scalar(In in) { + return CastIfScalar::cast(in); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_gemm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + int const K = problem_size.k(); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) { + ElementA a = tensor_a.at(MatrixCoord(row, k_block)); + ElementB b = tensor_b.at(MatrixCoord(k_block, col)); + + ComputeType compute_a(cast_if_scalar(a)); + ComputeType compute_b(cast_if_scalar(b)); + + accum[i][j] = inner_product_op(compute_a, compute_b, accum[i][j]); + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j]) + + beta * ScalarType(tensor_c.at(coord))); + } + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_gemm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum) { + compute_gemm( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c, + initial_accum); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = cutlass::arch::OpMultiplyAdd +> +struct Gemm; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add-saturate +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm, + NumericConverterClamp>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm, + NumericConverterClamp>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for XOR-popc +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +/// Partial specialization for AND-popc +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct Gemm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_gemm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Batched GEMM +// +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a batch of GEMMs over a set of matrices of common dimension. +// +// TensorRefCollection* is a type satisfying the TensorRefCollection concept. +// +template < + typename TensorRefCollectionA, + typename TensorRefCollectionB, + typename TensorRefCollectionC, + typename ScalarType, + typename AccumulatorType +> +void BatchedGemm( + gemm::GemmCoord problem_size, + int batch_count, + ScalarType alpha, + TensorRefCollectionA const& tensor_a, + TensorRefCollectionB const& tensor_b, + ScalarType beta, + TensorRefCollectionC &tensor_c, + AccumulatorType initial_accum) { + + typename TensorRefCollectionA::ConstIterator tensor_a_it = tensor_a.begin(); + typename TensorRefCollectionB::ConstIterator tensor_b_it = tensor_b.begin(); + typename TensorRefCollectionC::ConstIterator tensor_c_it = tensor_c.begin(); + + for (int batch = 0; + batch < batch_count; + ++batch, ++tensor_a_it, ++tensor_b_it, ++tensor_c_it) { + + Gemm + gemm; + + gemm(problem_size, alpha, *tensor_a_it, *tensor_b_it, beta, *tensor_c_it, + initial_accum); + } +} + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +// +// TensorRefCollection* is a type satisfying the TensorRefCollection concept. +// +template < + typename TensorRefCollectionA, + typename TensorRefCollectionB, + typename TensorRefCollectionC, + typename ScalarType, + typename AccumulatorType +> +void BatchedGemm( + gemm::GemmCoord problem_size, + int batch_count, + ScalarType alpha, + TensorRefCollectionA const& tensor_a, + TensorRefCollectionB const& tensor_b, + ScalarType beta, + TensorRefCollectionC &tensor_c) { + + BatchedGemm(problem_size, batch_count, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0)); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..a884023feec8f25ec93221a2816a4a83c0eb629a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm_complex.h @@ -0,0 +1,210 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued GEMM in host-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_types.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/matrix_coord.h" + +#include "cutlass/tensor_view.h" + +#include "cutlass/gemm/gemm.h" + +namespace cutlass { +namespace reference { +namespace host { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ElementD = ElementC, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void GemmComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum, + int batch_count = 1, + int64_t batch_stride_A = 0, + int64_t batch_stride_B = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_D = 0) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + int const K = problem_size.k(); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) { + + // Compute matrix product using blocks + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) { + ElementA a = tensor_a.at(MatrixCoord(row, k_block)); + ElementB b = tensor_b.at(MatrixCoord(k_block, col)); + + ComputeType a_ik = ComputeType(a); + ComputeType b_kj = ComputeType(b); + + if (transform_a == ComplexTransform::kConjugate) { + a_ik = conj(a_ik); + } + + if (transform_b == ComplexTransform::kConjugate) { + b_kj = conj(b_kj); + } + + accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]); + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j]) + + beta * ScalarType(tensor_c.at(coord))); + } + } + } + + } // for (col_block) + } // for (row_block) + + tensor_a.add_pointer_offset(batch_stride_A); + tensor_b.add_pointer_offset(batch_stride_B); + tensor_c.add_pointer_offset(batch_stride_C); + tensor_d.add_pointer_offset(batch_stride_D); + + } // for (batch_idx) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// This assumes the accumulator type is the same type as the scalars. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ElementD = ElementC +> +void GemmComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d) { + + GemmComplex(problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, ScalarType(0)); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm_planar_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm_planar_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..7e942108d12ac4ba18b5f473a3f59cfc63ea8bd2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm_planar_complex.h @@ -0,0 +1,228 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued GEMM in host-side code. +*/ + +#pragma once + +#include "cutlass/coord.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_types.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_ref_planar_complex.h" + +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" + +namespace cutlass { +namespace reference { +namespace host { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add> +> +void GemmPlanarComplex( + gemm::GemmCoord problem_size, + complex alpha, + TensorRefPlanarComplex tensor_a, + ComplexTransform transform_a, + TensorRefPlanarComplex tensor_b, + ComplexTransform transform_b, + complex beta, + TensorRefPlanarComplex tensor_c, + TensorRefPlanarComplex tensor_d, + complex initial_accum) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + using ComplexA = typename TensorRefPlanarComplex::ComplexElement; + using ComplexB = typename TensorRefPlanarComplex::ComplexElement; + using ComplexC = typename TensorRefPlanarComplex::ComplexElement; + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + int const K = problem_size.k(); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + complex accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) { + + ComplexA a_ik = tensor_a.at(MatrixCoord(row, k_block)); + ComplexB b_kj = tensor_b.at(MatrixCoord(k_block, col)); + + complex a = complex{ + ComputeType(a_ik.real()), + ComputeType(a_ik.imag()) + }; + + complex b = complex{ + ComputeType(b_kj.real()), + ComputeType(b_kj.imag()) + }; + + if (transform_a == ComplexTransform::kConjugate) { + a = conj(a); + } + + if (transform_b == ComplexTransform::kConjugate) { + b = conj(b); + } + + accum[i][j] = inner_product_op(a, b, accum[i][j]); + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + + complex acc{ + ScalarType(accum[i][j].real()), + ScalarType(accum[i][j].imag()) + }; + + ComplexC d_ij = tensor_c.at(coord); + + complex src{ + ScalarType(d_ij.real()), + ScalarType(d_ij.imag()) + }; + + complex result = alpha * acc + beta * src; + + d_ij.real() = convert_op(result.real()); + d_ij.imag() = convert_op(result.imag()); + + tensor_d.at(coord) = d_ij; + } + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// This assumes the accumulator type is the same type as the scalars. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType +> +void GemmPlanarComplex( + gemm::GemmCoord problem_size, + complex alpha, + TensorRefPlanarComplex tensor_a, + ComplexTransform transform_a, + TensorRefPlanarComplex tensor_b, + ComplexTransform transform_b, + complex beta, + TensorRefPlanarComplex tensor_c, + TensorRefPlanarComplex tensor_d) { + + GemmPlanarComplex( + problem_size, + alpha, + tensor_a, transform_a, + tensor_b, transform_b, + beta, + tensor_c, + tensor_d, + complex()); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gett.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gett.hpp new file mode 100644 index 0000000000000000000000000000000000000000..7b52dc5874d9135a7f05d389589f9840dd2e9171 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gett.hpp @@ -0,0 +1,459 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for GETT in host-side code. +*/ + +#pragma once + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/complex.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/epilogue/thread/activation.h" + +#include "cute/tensor.hpp" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::reference::host { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template< + class ElementAccumulator_, + class TensorA_, // (M, K, L) + class TensorB_ // (N, K, L) +> +struct GettMainloopParams { + using ElementAccumulator = ElementAccumulator_; + using TensorA = TensorA_; + using TensorB = TensorB_; + using EngineA = typename TensorA::engine_type; + using LayoutA = typename TensorA::layout_type; + using EngineB = typename TensorB::engine_type; + using LayoutB = typename TensorB::layout_type; + + TensorA A{}; + TensorB B{}; + + ComplexTransform transform_A = ComplexTransform::kNone; + ComplexTransform transform_B = ComplexTransform::kNone; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template< + class ElementScalar_, + class ElementScalingFactor_, + class ElementAccumulator_, + class ElementCompute_, + class TensorC_, // (M, N, L) + class TensorD_, // (M, N, L) + class VectorBias_ = TensorD_, // (M, 1) + class TensorAux_ = TensorD_, // (M, N, L) + class VectorAlpha_ = TensorD_, // (M, 1) + class VectorBeta_ = VectorAlpha_, // (M, 1) + class ActivationFunctor_ = cutlass::epilogue::thread::Identity, + class BiasBinaryOp_ = cutlass::plus +> +struct GettEpilogueParams { + using ElementScalar = ElementScalar_; + using ElementScalingFactor = ElementScalingFactor_; + using ElementAccumulator = ElementAccumulator_; + using ElementCompute = ElementCompute_; + using TensorC = TensorC_; + using TensorD = TensorD_; + using TensorAux = TensorAux_; + using VectorBias = VectorBias_; + using VectorAlpha = VectorAlpha_; + using VectorBeta = VectorBeta_; + using ActivationFunctor = ActivationFunctor_; + using BiasBinaryOp = BiasBinaryOp_; + + using EngineC = typename TensorC::engine_type; + using LayoutC = typename TensorC::layout_type; + using EngineD = typename TensorD::engine_type; + using LayoutD = typename TensorD::layout_type; + + ElementScalar alpha = ElementScalar(1); + ElementScalar beta = ElementScalar(0); + + TensorC C{}; + TensorD D{}; + VectorBias Bias{}; + TensorAux Aux{}; + VectorAlpha Valpha{}; + VectorBeta Vbeta{}; + + ElementAccumulator* abs_max_D = nullptr; + ElementAccumulator* abs_max_Aux = nullptr; + + ElementScalingFactor scale_a = ElementScalingFactor(1); + ElementScalingFactor scale_b = ElementScalingFactor(1); + ElementScalingFactor scale_c = ElementScalingFactor(1); + ElementScalingFactor scale_d = ElementScalingFactor(1); + ElementScalingFactor scale_aux = ElementScalingFactor(1); + + bool beta_per_channel_scaling = false; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// GETT - General Tensor-Tensor contraction reference kernel +template < + class MainloopParams, + class EpilogueParams +> +void Gett( + MainloopParams const& mainloop_params, + EpilogueParams const& epilogue_params) +{ + + static int constexpr kBlockM = 64; + static int constexpr kBlockN = 64; + +#if defined(_OPENMP) + #pragma omp parallel for collapse(3) +#endif + for (int64_t l = 0; l < cute::size<2>(mainloop_params.A.layout()); ++l) { + for (int64_t m = 0; m < cute::size<0>(mainloop_params.A.layout()); m += kBlockM) { + for (int64_t n = 0; n < cute::size<0>(mainloop_params.B.layout()); n += kBlockN) { + typename MainloopParams::ElementAccumulator acc[kBlockM][kBlockN]; + gett_mainloop(mainloop_params, m, n, l, acc); + gett_epilogue(epilogue_params, m, n, l, acc); + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// GETT - Mainloop +template +void gett_mainloop( + MainloopParams const& mainloop_params, + int64_t m, + int64_t n, + int64_t l, + ElementAccumulator (&acc)[kBlockM][kBlockN]) +{ + + static_assert(cute::rank(typename MainloopParams::LayoutA{}) == 3, "M, K, B"); + static_assert(cute::rank(typename MainloopParams::LayoutB{}) == 3, "N, K, B"); + + using ElementA = typename MainloopParams::TensorA::value_type; + using ElementB = typename MainloopParams::TensorB::value_type; + + using RingOp = multiply_add; + RingOp fma_op; + + // Zero out accumulators + for (int m_b = 0; m_b < kBlockM; ++m_b) { + for (int n_b = 0; n_b < kBlockN; ++n_b) { + acc[m_b][n_b] = ElementAccumulator(0); // RingOp::AdditionIdentity + } + } + + // Compute on this k-block + for (int64_t k = 0; k < cute::size<1>(mainloop_params.A.layout()); ++k) { + // Load A + ElementAccumulator a_frag[kBlockM]; + for (int m_b = 0; m_b < kBlockM; ++m_b) { + if (m + m_b < cute::size<0>(mainloop_params.A.layout())) { + a_frag[m_b] = static_cast(mainloop_params.A(m + m_b, k, l)); + if (mainloop_params.transform_A == ComplexTransform::kConjugate) { + a_frag[m_b] = conj(a_frag[m_b]); + } + } else { + a_frag[m_b] = ElementAccumulator(0); // RingOp::AdditionIdentity + } + } + + // Load B + ElementAccumulator b_frag[kBlockN]; + for (int n_b = 0; n_b < kBlockN; ++n_b) { + if (n + n_b < cute::size<0>(mainloop_params.B.layout())) { + b_frag[n_b] = static_cast(mainloop_params.B(n + n_b, k, l)); + if (mainloop_params.transform_B == ComplexTransform::kConjugate) { + b_frag[n_b] = conj(b_frag[n_b]); + } + } else { + b_frag[n_b] = ElementAccumulator(0); // RingOp::AdditionIdentity + } + } + + // do compute + for (int m_b = 0; m_b < kBlockM; ++m_b) { + for (int n_b = 0; n_b < kBlockN; ++n_b) { + acc[m_b][n_b] = fma_op(a_frag[m_b], b_frag[n_b], acc[m_b][n_b]); + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// GETT - Epilogue +template +void gett_epilogue( + EpilogueParams const& epilogue_params, + int64_t m, + int64_t n, + int64_t l, + ElementAccumulator (&acc)[kBlockM][kBlockN]) +{ + static_assert(cute::rank(typename EpilogueParams::LayoutC{}) == 3, "M, K, B"); + static_assert(cute::rank(typename EpilogueParams::LayoutD{}) == 3, "N, K, B"); + + using ElementCompute = typename EpilogueParams::ElementCompute; + using ElementC = typename EpilogueParams::TensorC::value_type; + using ElementD = typename EpilogueParams::TensorD::value_type; + using ElementAux = typename EpilogueParams::TensorAux::value_type; + using ElementBias = typename EpilogueParams::VectorBias::value_type; + using ElementScalar = typename EpilogueParams::ElementScalar; + using ElementScalingFactor = typename EpilogueParams::ElementScalingFactor; + using ActivationFunctor = typename EpilogueParams::ActivationFunctor; + using BiasBinaryOp = typename EpilogueParams::BiasBinaryOp; + + constexpr bool IsScalingAndAmaxOutputNeeded = + std::is_same_v or + std::is_same_v; + + constexpr bool IsScalingAndAmaxAuxOutputNeeded = + std::is_same_v or + std::is_same_v; + + // Input related converter + NumericConverter accumulator_converter; + NumericConverter source_converter; + NumericConverter bias_converter; + + // Scale related converter + NumericConverter scale_converter; + NumericConverter scaling_factor_converter; + + // Abs max converter + [[maybe_unused]] NumericConverter abs_max_output_converter; + + // Output related converter + NumericConverter destination_converter; + NumericConverter aux_destination_converter; + + // Epilogue operations + multiply_add epilogue_fma; + multiplies mul; + + // Activation operation + ActivationFunctor activation; + + // Bias binary operation + BiasBinaryOp bias_op; + + // Do conversion + ElementCompute converted_alpha = scale_converter(epilogue_params.alpha); + ElementCompute converted_beta = scale_converter(epilogue_params.beta); + ElementCompute converted_scale_a = scaling_factor_converter(epilogue_params.scale_a); + ElementCompute converted_scale_b = scaling_factor_converter(epilogue_params.scale_b); + ElementCompute converted_scale_c = scaling_factor_converter(epilogue_params.scale_c); + ElementCompute converted_scale_d = scaling_factor_converter(epilogue_params.scale_d); + ElementCompute converted_scale_aux = scaling_factor_converter(epilogue_params.scale_aux); + + // Init local var + [[maybe_unused]] ElementCompute local_abs_max_output = ElementCompute(0); + [[maybe_unused]] ElementCompute local_abs_max_aux_output = ElementCompute(0); + + converted_alpha = mul(converted_alpha, mul(converted_scale_a, converted_scale_b)); + converted_beta = mul(converted_beta, converted_scale_c); + + for (int n_b = 0; n_b < kBlockN; ++n_b) { + for (int m_b = 0; m_b < kBlockM; ++m_b) { + if (m + m_b < cute::size<0>(epilogue_params.D.layout()) && n + n_b < cute::size<1>(epilogue_params.D.layout())) { + // Convert every type to ElementCompute first, do compute, convert to output type, write it out + ElementCompute converted_acc = accumulator_converter(acc[m_b][n_b]); + // per-row alpha + if (epilogue_params.Valpha.data()) { + converted_alpha = scale_converter(epilogue_params.Valpha(m + m_b)); + } + ElementCompute output = mul(converted_alpha, converted_acc); + + if (epilogue_params.Bias.data()) { + ElementCompute converted_bias = bias_converter(epilogue_params.Bias(m + m_b)); + output = bias_op(output, converted_bias); + } + + if (epilogue_params.C.data()) { + ElementCompute converted_src = source_converter(epilogue_params.C(m + m_b, n + n_b, l)); + // per-row beta + if (epilogue_params.Vbeta.data()) { + converted_beta = scale_converter(epilogue_params.Vbeta(m + m_b)); + } + output = epilogue_fma(converted_beta, converted_src, output); + } + + if (epilogue_params.Aux.data()) { + auto aux_output = output; + if constexpr (IsScalingAndAmaxAuxOutputNeeded) { + maximum_absolute_value_reduction amax_op; + local_abs_max_aux_output = amax_op(local_abs_max_aux_output, aux_output); + aux_output = epilogue_fma(converted_scale_aux, aux_output, ElementCompute(0)); + } + + epilogue_params.Aux(m + m_b, n + n_b, l) = aux_destination_converter(aux_output); + } + + output = activation(output); + + if constexpr (IsScalingAndAmaxOutputNeeded) { + maximum_absolute_value_reduction amax_op; + local_abs_max_output = amax_op(local_abs_max_output, output); + output = epilogue_fma(converted_scale_d, output, ElementCompute(0)); + } + + epilogue_params.D(m + m_b, n + n_b, l) = destination_converter(output); + } + } + } +#if defined(_OPENMP) + #pragma omp critical(Abs_Max_Data_Update) +#endif + { + if constexpr (IsScalingAndAmaxOutputNeeded) { + if (epilogue_params.abs_max_D) { + *epilogue_params.abs_max_D = maximum_with_nan_propogation{}( + *epilogue_params.abs_max_D, abs_max_output_converter(local_abs_max_output)); + } + } + + if constexpr (IsScalingAndAmaxAuxOutputNeeded) { + if (epilogue_params.abs_max_Aux) { + *epilogue_params.abs_max_Aux = maximum_with_nan_propogation{}( + *epilogue_params.abs_max_Aux, abs_max_output_converter(local_abs_max_aux_output)); + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +auto make_layout_rank3(const TensorType& tensor) { + // append a batch mode of size 1 if we do not have tensors that are rank 3 + return make_layout( + make_shape(cute::get<0>(tensor.shape()), cute::get<1>(tensor.shape()), cute::Int<1>{}), + make_stride(cute::get<0>(tensor.stride()), cute::get<1>(tensor.stride()), int64_t(cosize(tensor.layout())))); +} + +/// GEMM - General Matrix-Matrix contraction without conjugation options +template < + class MainloopParams, + class EpilogueParams +> +void Gemm3x( + MainloopParams const& mainloop_params, + EpilogueParams const& epilogue_params) +{ + using namespace cute; + + static_assert(rank(typename MainloopParams::LayoutA{}) == rank(typename MainloopParams::LayoutB{})); + static_assert(rank(typename EpilogueParams::LayoutC{}) == rank(typename EpilogueParams::LayoutD{})); + static_assert(rank(typename MainloopParams::LayoutA{}) == rank(typename EpilogueParams::LayoutC{})); + + if constexpr (rank(typename MainloopParams::LayoutA{}) == 2) { + Layout layout_A = make_layout_rank3(mainloop_params.A); + Layout layout_B = make_layout_rank3(mainloop_params.B); + Layout layout_C = make_layout_rank3(epilogue_params.C); + Layout layout_D = make_layout_rank3(epilogue_params.D); + Layout layout_Aux = make_layout_rank3(epilogue_params.Aux); + Layout layout_Bias = make_layout_rank3(epilogue_params.Bias); + Layout layout_Valpha = make_layout_rank3(epilogue_params.Valpha); + Layout layout_Vbeta = make_layout_rank3(epilogue_params.Vbeta); + + auto TensorA = make_tensor(mainloop_params.A.data(), layout_A); + auto TensorB = make_tensor(mainloop_params.B.data(), layout_B); + auto TensorC = make_tensor(epilogue_params.C.data(), layout_C); + auto TensorD = make_tensor(epilogue_params.D.data(), layout_D); + auto TensorAux = make_tensor(epilogue_params.Aux.data(), layout_Aux); + auto VectorBias = make_tensor(epilogue_params.Bias.data(), layout_Bias); + auto VectorAlpha = make_tensor(epilogue_params.Valpha.data(), layout_Valpha); + auto VectorBeta = make_tensor(epilogue_params.Vbeta.data(), layout_Vbeta); + + // Reconstruct mainloop params + GettMainloopParams + mainloop_params_converted{TensorA, + TensorB, + mainloop_params.transform_A, + mainloop_params.transform_B}; + + // Reconstruct epilogue params + GettEpilogueParams + epilogue_params_converted{epilogue_params.alpha, + epilogue_params.beta, + TensorC, + TensorD, + VectorBias, + TensorAux, + VectorAlpha, + VectorBeta, + epilogue_params.abs_amax_D, + epilogue_params.abs_amax_Aux, + epilogue_params.scale_a, + epilogue_params.scale_b, + epilogue_params.scale_c, + epilogue_params.scale_d, + epilogue_params.scale_aux + }; + + Gett(mainloop_params_converted, epilogue_params_converted); + } + else { + // if we already have a batch mode, just pass it through + Gett(mainloop_params, epilogue_params); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // cutlass::reference::host + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k.h new file mode 100644 index 0000000000000000000000000000000000000000..5b342600ac3619a17e17869a874a74753c2ca397 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k.h @@ -0,0 +1,261 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for Rank 2k update in host-side code. + + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/arch/mma.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" + +namespace cutlass { +namespace reference { +namespace host { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + FillMode FillModeC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_rank2k( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + static_assert( + FillModeC == FillMode::kLower || + FillModeC == FillMode::kUpper, + "Fill Mode can either be Lower or Upper."); + + using CompareOp = typename platform::conditional<(FillModeC == FillMode::kLower), + std::greater_equal, + std::less_equal>::type; + + // Note: batch is ignored. + // Note: M is same as N for Rank 2k update + int const N = problem_size.n(); + int const K = problem_size.k(); + + // Blocking necessary to speedup reference implementation + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + CompareOp compare_op; + + for (int row_block = 0; row_block < N; row_block += Nblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Nblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Nblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Nblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < N && col < N && compare_op(row, col)) + { + + // A x B^T + ElementA a = tensor_a.at(MatrixCoord(row, k_block)); + ElementB b_t = tensor_b.at(MatrixCoord(col, k_block)); + + ComputeType compute_a(cast_if_scalar(a)); + ComputeType compute_b_t(cast_if_scalar(b_t)); + + accum[i][j] = inner_product_op(compute_a, compute_b_t, accum[i][j]); + + // B x A^T + ElementB b = tensor_b.at(MatrixCoord(row, k_block)); + ElementA a_t = tensor_a.at(MatrixCoord(col, k_block)); + + ComputeType compute_b(cast_if_scalar(b)); + ComputeType compute_a_t(cast_if_scalar(a_t)); + + accum[i][j] = inner_product_op(compute_b, compute_a_t, accum[i][j]); + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Nblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < N && col < N && + ( (FillModeC == FillMode::kLower && row >= col) || + (FillModeC == FillMode::kUpper && row <= col) ) + ) { + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j]) + + beta * ScalarType(tensor_c.at(coord))); + } + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general Rank 2k update (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + FillMode FillModeC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_rank2k( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum) { + compute_rank2k( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c, + initial_accum); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + FillMode FillModeC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = cutlass::arch::OpMultiplyAdd +> +struct Rank2K; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct Rank2K { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_rank2k>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_rank2k>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..519379cd599ed691deea5062702181c29329be82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k_complex.h @@ -0,0 +1,318 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued Rank 2K update in host-side code. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" +#include + +namespace cutlass { +namespace reference { +namespace host { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Rank2KComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum, + FillMode fill_mode_c, + BlasMode blas_mode, + int batch_count = 1, + int64_t batch_stride_A = 0, + int64_t batch_stride_B = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_D = 0) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + int const K = problem_size.k(); + + // Rank2K update operates on A=NxK, B=NxK, and C=NxN + assert(M==N); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) { + + // Compute matrix product using blocks + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N && + ( (fill_mode_c == FillMode::kLower && row >= col) || + (fill_mode_c == FillMode::kUpper && row <= col) ) + ) { + + // A x B^T (Symmetric) or A x B^H (Hermitian) + // complex conjugation on operandB (b_t) is function of blas3 computation + ElementA a = tensor_a.at(MatrixCoord(row, k_block)); + ElementB b_t = (blas_mode == BlasMode::kHermitian) ? + conj(tensor_b.at(MatrixCoord(col, k_block))) : + tensor_b.at(MatrixCoord(col, k_block)); + + ComputeType a_ik = ComputeType(a); + ComputeType b_jk = ComputeType(b_t); + + // complex conjugation is a function of operand layouts + if (transform_a == ComplexTransform::kConjugate) { + a_ik = conj(a_ik); + } + // complex conjugation is a function of operand layouts + if (transform_b == ComplexTransform::kConjugate) { + b_jk = conj(b_jk); + } + + accum[i][j] = inner_product_op(a_ik, b_jk, accum[i][j]); + } + } + } + } + + /* HER2K need two epilogues to handle complex alpha value */ + if ( blas_mode == BlasMode::kHermitian ) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N && + ((fill_mode_c == FillMode::kLower && row >= col) || + (fill_mode_c == FillMode::kUpper && row <= col)) + ) { + + ScalarType c = tensor_c.at(coord); + // The imaginary parts of the diagonal elements of + // a complex data type are assumed and set to zero + if (blas_mode == BlasMode::kHermitian) { + c = (row == col) ? real(c) : c; + } + + tensor_d.at(coord) = convert_op(alpha * + ScalarType(accum[i][j]) + + beta * c); + } + } + } + + /* Zeoring out accum for second HERK */ + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N && + ( (fill_mode_c == FillMode::kLower && row >= col) || + (fill_mode_c == FillMode::kUpper && row <= col) ) + ) { + + // B x A^T (Symmetric) or B x A^H (Hermitian) + // complex conjugation on operandB (a_t) is function of blas3 computation + ElementB b = tensor_b.at(MatrixCoord(row, k_block)); + ElementA a_t = (blas_mode == BlasMode::kHermitian) ? + conj(tensor_a.at(MatrixCoord(col, k_block))): + tensor_a.at(MatrixCoord(col, k_block)); + + ComputeType b_ik = ComputeType(b); + ComputeType a_jk = ComputeType(a_t); + + // complex conjugation here is a function of operand layouts + if (transform_b == ComplexTransform::kConjugate) { + b_ik = conj(b_ik); + } + // complex conjugation here is a function of operand layouts + if (transform_a == ComplexTransform::kConjugate) { + a_jk = conj(a_jk); + } + + accum[i][j] = inner_product_op(b_ik, a_jk, accum[i][j]); + } + } + } + } + + ScalarType alpha_hermitian = (blas_mode == BlasMode::kHermitian) ? + conj(alpha) : alpha; + ScalarType beta_hermitian = (blas_mode == BlasMode::kHermitian) ? + 1 : beta; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N && + ((fill_mode_c == FillMode::kLower && row >= col) || + (fill_mode_c == FillMode::kUpper && row <= col)) + ) { + + ScalarType d = (blas_mode == BlasMode::kHermitian) ? + tensor_d.at(coord) : tensor_c.at(coord); + + ScalarType tmp_d = convert_op( + alpha_hermitian * ScalarType(accum[i][j]) + + beta_hermitian * d); + + if (blas_mode == BlasMode::kHermitian && row == col ) { + tensor_d.at(coord) = real(tmp_d); + } else { + tensor_d.at(coord) = tmp_d; + } + } + } + } + + } // for (col_block) + } // for (row_block) + + tensor_a.add_pointer_offset(batch_stride_A); + tensor_b.add_pointer_offset(batch_stride_B); + tensor_c.add_pointer_offset(batch_stride_C); + tensor_d.add_pointer_offset(batch_stride_D); + + } // for (batch_idx) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// This assumes the accumulator type is the same type as the scalars. +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType +> +void Rank2KComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + TensorRef tensor_b, + ComplexTransform transform_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + FillMode fill_mode_c, + BlasMode blas_mode) { + + Rank2KComplex( + problem_size, alpha, + tensor_a, transform_a, + tensor_b, transform_b, + beta, tensor_c, tensor_d, + ScalarType(0), + fill_mode_c, + blas_mode); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_k_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_k_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..d5f3f2e88cb3fd62a2dd0bcad5fbe1b1896bd5d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_k_complex.h @@ -0,0 +1,234 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued Rank 2K update in host-side code. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" +#include + +namespace cutlass { +namespace reference { +namespace host { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename ConvertOp = NumericConverter, + typename InnerProductOp = multiply_add +> +void Rank2KComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum, + FillMode fill_mode_c, + BlasMode blas_mode, + int batch_count = 1, + int64_t batch_stride_A = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_D = 0) { + + static_assert( + LayoutA::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + int const K = problem_size.k(); + + // Rank2K update operates on A=NxK, B=NxK, and C=NxN + assert(M==N); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + + for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) { + + // Compute matrix product using blocks + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N && + ( (fill_mode_c == FillMode::kLower && row >= col) || + (fill_mode_c == FillMode::kUpper && row <= col) ) + ) { + + // A x A^T (Symmetric) or A x A^H (Hermitian) + // complex conjugation on operandB (a_t) (function of blas3 computation) + ElementA a = tensor_a.at(MatrixCoord(row, k_block)); + ElementA a_t = (blas_mode == BlasMode::kHermitian) ? + conj(tensor_a.at(MatrixCoord(col, k_block))) : + tensor_a.at(MatrixCoord(col, k_block)); + + ComputeType a_ik = ComputeType(a); + ComputeType b_jk = ComputeType(a_t); + + // complex conjugation (function of input layouts) + if (transform_a == ComplexTransform::kConjugate) { + a_ik = conj(a_ik); + } + // complex conjugation (function of input layouts) + if (transform_a == ComplexTransform::kConjugate) { + b_jk = conj(b_jk); + } + + accum[i][j] = inner_product_op(a_ik, b_jk, accum[i][j]); + + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N && + ((fill_mode_c == FillMode::kLower && row >= col) || + (fill_mode_c == FillMode::kUpper && row <= col)) + ) { + + ScalarType c = tensor_c.at(coord); + // The imaginary parts of the diagonal elements of + // a complex data type are assumed and set to zero + if (blas_mode == BlasMode::kHermitian) { + c = (row == col) ? real(c) : c; + } + + ScalarType tmp_d = convert_op( + alpha * ScalarType(accum[i][j]) + + beta * c); + + if (blas_mode == BlasMode::kHermitian && row == col ) { + tensor_d.at(coord) = real(tmp_d); + } else { + tensor_d.at(coord) = tmp_d; + } + } + } + } + + } // for (col_block) + } // for (row_block) + + tensor_a.add_pointer_offset(batch_stride_A); + tensor_c.add_pointer_offset(batch_stride_C); + tensor_d.add_pointer_offset(batch_stride_D); + + } // for (batch_idx) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// This assumes the accumulator type is the same type as the scalars. +template < + typename ElementA, + typename LayoutA, + typename ElementC, + typename LayoutC, + typename ScalarType +> +void RankKComplex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + ComplexTransform transform_a, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + FillMode fill_mode_c, + BlasMode blas_mode) { + + Rank2KComplex( + problem_size, alpha, + tensor_a, transform_a, + beta, tensor_c, tensor_d, + ScalarType(0), + fill_mode_c, + blas_mode); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/symm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/symm.h new file mode 100644 index 0000000000000000000000000000000000000000..736107aa95c6f8a46d92b73e78c50c08cfb2143c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/symm.h @@ -0,0 +1,285 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for SYMM update in host-side code. + + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/numeric_conversion.h" + +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/arch/mma.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/gemm.h" + +namespace cutlass { +namespace reference { +namespace host { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename ElementA, + typename LayoutA, + SideMode SideModeA, + FillMode FillModeA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_symm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum) { + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + static_assert(SideModeA != SideMode::kInvalid + , "Side Mode can either be Left or Right."); + + static_assert( + FillModeA == FillMode::kLower || + FillModeA == FillMode::kUpper, + "Fill Mode can either be Lower or Upper."); + + using CompareOp_w_diag = typename TrMatrixCompareOp::Type; + using CompareOp_wo_diag = typename TrMatrixCompareOp::Type; + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + // Assuming correct k-dimension value is passed + int const K = problem_size.k(); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + CompareOp_w_diag compare_op_1; + CompareOp_wo_diag compare_op_2; + + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) { + ElementA a_1 = ElementA(); + ElementB b_1 = ElementB(); + ElementA a_2 = ElementA(); + ElementB b_2 = ElementB(); + + // A x B or B x A (with diagonal) + if (SideModeA == SideMode::kLeft) { + a_1 = (compare_op_1(row, k_block)) ? + (tensor_a.at(MatrixCoord(row, k_block))) : ElementA(); + b_1 = tensor_b.at(MatrixCoord(k_block, col)); + } else if (SideModeA == SideMode::kRight) { + a_1 = tensor_b.at(MatrixCoord(row, k_block)); + b_1 = (compare_op_1(k_block, col)) ? + tensor_a.at(MatrixCoord(k_block, col)) : ElementA(); + } + + ComputeType compute_a_1(cast_if_scalar(a_1)); + ComputeType compute_b_1(cast_if_scalar(b_1)); + + accum[i][j] = inner_product_op(compute_a_1, compute_b_1, accum[i][j]); + + // A^T x B or B x A^T (without diagonal) + if (SideModeA == SideMode::kLeft) { + a_2 = (compare_op_2(k_block, row)) ? + (tensor_a.at(MatrixCoord(k_block, row))) : ElementA(); + b_2 = tensor_b.at(MatrixCoord(k_block, col)); + } else if (SideModeA == SideMode::kRight) { + a_2 = tensor_b.at(MatrixCoord(row, k_block)); + b_2 = (compare_op_2(col, k_block)) ? + tensor_a.at(MatrixCoord(col, k_block)) : ElementA(); + } + + ComputeType compute_a_2(cast_if_scalar(a_2)); + ComputeType compute_b_2(cast_if_scalar(b_2)); + + accum[i][j] = inner_product_op(compute_a_2, compute_b_2, accum[i][j]); + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j]) + + beta * ScalarType(tensor_c.at(coord))); + } + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general Symm update (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename ElementA, + typename LayoutA, + SideMode SideModeA, + FillMode FillModeA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_symm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum) { + compute_symm( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c, + initial_accum); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA, + typename LayoutA, + SideMode SideModeA, + FillMode FillModeA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = cutlass::arch::OpMultiplyAdd +> +struct Symm; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct Symm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_symm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); + } + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_symm>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/symm_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/symm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..aa4689101314f08dd410f1e977c4ad003125f149 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/symm_complex.h @@ -0,0 +1,319 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued SYMM update in host-side code. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" +#include + +namespace cutlass { +namespace reference { +namespace host { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef +/// objects. +/// +/// Explicitly naming types needed by this template can be cumbersome, particularly for the +/// accumulator type, so a function argument 'initial_accum' is exposed. Passing +/// AccumulatorType(0) as the last function argument can be easier than naming all template +/// arguments explicitly. +template < + typename ElementA, + typename LayoutA, + SideMode SideModeA, + FillMode FillModeA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + BlasMode BlasMode_ = BlasMode::kSymmetric, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_symm_complex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum, + int batch_count = 1, + int64_t batch_stride_A = 0, + int64_t batch_stride_B = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_D = 0) { + + static SideMode const kSideModeA = SideModeA; + static FillMode const kFillModeA = FillModeA; + static BlasMode const kBlasMode = BlasMode_; + + static_assert( + LayoutA::kRank == 2 && + LayoutB::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + static_assert(kSideModeA != SideMode::kInvalid + , "Side Mode can either be Left or Right."); + + static_assert( + kFillModeA == FillMode::kLower || + kFillModeA == FillMode::kUpper, + "Fill Mode can either be Lower or Upper."); + + using CompareOp_w_diag = typename TrMatrixCompareOp::Type; + using CompareOp_wo_diag = typename TrMatrixCompareOp::Type; + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + // Assuming correct k-dimension value is passed + int const K = problem_size.k(); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + CompareOp_w_diag compare_op_1; + CompareOp_wo_diag compare_op_2; + + for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) { + + // Compute matrix product using blocks + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) + { + ElementA a_1 = ElementA(); + ElementB b_1 = ElementB(); + ElementA a_2 = ElementA(); + ElementB b_2 = ElementB(); + + // A x B or B x A (with diagonal) + if (kSideModeA == SideMode::kLeft) { + a_1 = (compare_op_1(row, k_block)) ? + (tensor_a.at(MatrixCoord(row, k_block))) : ElementA(); + b_1 = tensor_b.at(MatrixCoord(k_block, col)); + } else if (kSideModeA == SideMode::kRight) { + a_1 = tensor_b.at(MatrixCoord(row, k_block)); + b_1 = (compare_op_1(k_block, col)) ? + tensor_a.at(MatrixCoord(k_block, col)) : ElementA(); + } + ComputeType compute_a_1 = ComputeType(a_1); + ComputeType compute_b_1 = ComputeType(b_1); + + // The imaginary parts of the diagonal elements of + // a complex data type are assumed and set to zero + if (kBlasMode == BlasMode::kHermitian && kSideModeA == SideMode::kLeft && row == k_block) { + compute_a_1 = real(compute_a_1); + } else if (kBlasMode == BlasMode::kHermitian && kSideModeA == SideMode::kRight && k_block == col) { + compute_b_1 = real(compute_b_1); + } + + accum[i][j] = inner_product_op(compute_a_1, compute_b_1, accum[i][j]); + + // A^T x B or B x A^T (without diagonal) + if (kSideModeA == SideMode::kLeft) { + a_2 = (compare_op_2(k_block, row)) ? + (tensor_a.at(MatrixCoord(k_block, row))) : ElementA(); + b_2 = tensor_b.at(MatrixCoord(k_block, col)); + if (kBlasMode == BlasMode::kHermitian) + a_2 = conj(a_2); + } else if (kSideModeA == SideMode::kRight) { + a_2 = tensor_b.at(MatrixCoord(row, k_block)); + b_2 = (compare_op_2(col, k_block)) ? + tensor_a.at(MatrixCoord(col, k_block)) : ElementA(); + if (kBlasMode == BlasMode::kHermitian) + b_2 = conj(b_2); + } + + ComputeType compute_a_2 = ComputeType(a_2); + ComputeType compute_b_2 = ComputeType(b_2); + + accum[i][j] = inner_product_op(compute_a_2, compute_b_2, accum[i][j]); + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + + ScalarType c = tensor_c.at(coord); + + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j]) + + beta * c); + } + } + } + + } // for (col_block) + } // for (row_block) + + tensor_a.add_pointer_offset(batch_stride_A); + tensor_b.add_pointer_offset(batch_stride_B); + tensor_c.add_pointer_offset(batch_stride_C); + tensor_d.add_pointer_offset(batch_stride_D); + + } // for (batch_idx) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA, + typename LayoutA, + SideMode SideModeA, + FillMode FillModeA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + BlasMode BlasMode_ = cutlass::BlasMode::kSymmetric, + typename InnerProductOp = cutlass::arch::OpMultiplyAddComplex +> +struct SymmComplex; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct SymmComplex { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_symm_complex>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for gaussian multiply-add +template +struct SymmComplex { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, ScalarType beta, + TensorRef tensor_c, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_symm_complex>( + problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.h new file mode 100644 index 0000000000000000000000000000000000000000..20187aba04a37b46eecbf52b1cc8241008aba94e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.h @@ -0,0 +1,423 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines host-side elementwise operations on TensorView. +*/ + +#pragma once + +// Standard Library includes +#include + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/relatively_equal.h" +#include "cutlass/tensor_view.h" +#include "cutlass/tensor_view_planar_complex.h" + +#include "cutlass/util/distribution.h" +#include "tensor_foreach.h" + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorEqualsFunc { + + // + // Data members + // + + TensorView lhs; + TensorView rhs; + bool result; + + /// Ctor + TensorEqualsFunc(): result(true) { } + + /// Ctor + TensorEqualsFunc( + TensorView const &lhs_, + TensorView const &rhs_ + ) : + lhs(lhs_), rhs(rhs_), result(true) { } + + /// Visits a coordinate + void operator()(Coord const &coord) { + + Element lhs_ = lhs.at(coord); + Element rhs_ = rhs.at(coord); + + if (lhs_ != rhs_) { + result = false; + } + } + + /// Returns true if equal + operator bool() const { + return result; + } +}; + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorRelativelyEqualsFunc { + + // + // Data members + // + + TensorView lhs; + TensorView rhs; + Element epsilon; + Element nonzero_floor; + bool result; + + /// Ctor + TensorRelativelyEqualsFunc( + TensorView const &lhs_, + TensorView const &rhs_, + Element epsilon_, + Element nonzero_floor_ + ) : + lhs(lhs_), + rhs(rhs_), + epsilon(epsilon_), + nonzero_floor(nonzero_floor_), + result(true) { } + + /// Visits a coordinate + void operator()(Coord const &coord) { + + Element lhs_ = lhs.at(coord); + Element rhs_ = rhs.at(coord); + + if (!relatively_equal(lhs_, rhs_, epsilon, nonzero_floor)) { + result = false; + } + } + + /// Returns true if equal + operator bool() const { + return result; + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if two tensor views are equal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +bool TensorEquals( + TensorView const &lhs, + TensorView const &rhs) { + + // Extents must be identical + if (lhs.extent() != rhs.extent()) { + return false; + } + + detail::TensorEqualsFunc func(lhs, rhs); + TensorForEach( + lhs.extent(), + func + ); + + return bool(func); +} + +/// Returns true if two tensor views are equal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +bool TensorEquals( + TensorViewPlanarComplex const &lhs, + TensorViewPlanarComplex const &rhs) { + + // Extents must be identical + if (lhs.extent() != rhs.extent()) { + return false; + } + + detail::TensorEqualsFunc real_func( + {lhs.data(), lhs.layout(), lhs.extent()}, + {rhs.data(), rhs.layout(), rhs.extent()} + ); + + TensorForEach( + lhs.extent(), + real_func + ); + + if (!bool(real_func)) { + return false; + } + + detail::TensorEqualsFunc imag_func( + {lhs.data() + lhs.imaginary_stride(), lhs.layout(), lhs.extent()}, + {rhs.data() + rhs.imaginary_stride(), rhs.layout(), rhs.extent()} + ); + + TensorForEach( + lhs.extent(), + imag_func + ); + + return bool(imag_func); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if two tensor views are relatively equal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +bool TensorRelativelyEquals( + TensorView const &lhs, + TensorView const &rhs, + Element epsilon, + Element nonzero_floor) { + + // Extents must be identical + if (lhs.extent() != rhs.extent()) { + return false; + } + + detail::TensorRelativelyEqualsFunc func(lhs, rhs, epsilon, nonzero_floor); + TensorForEach( + lhs.extent(), + func + ); + + return bool(func); +} + +/// Returns true if two tensor views are relatively equal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +bool TensorRelativelyEquals( + TensorViewPlanarComplex const &lhs, + TensorViewPlanarComplex const &rhs, + Element epsilon, + Element nonzero_floor) { + + // Extents must be identical + if (lhs.extent() != rhs.extent()) { + return false; + } + + detail::TensorRelativelyEqualsFunc real_func( + {lhs.data(), lhs.layout(), lhs.extent()}, + {rhs.data(), rhs.layout(), rhs.extent()}, + epsilon, + nonzero_floor + ); + + TensorForEach( + lhs.extent(), + real_func + ); + + if (!bool(real_func)) { + return false; + } + + detail::TensorEqualsFunc imag_func( + {lhs.data() + lhs.imaginary_stride(), lhs.layout(), lhs.extent()}, + {rhs.data() + rhs.imaginary_stride(), rhs.layout(), rhs.extent()}, + epsilon, + nonzero_floor + ); + + TensorForEach( + lhs.extent(), + imag_func + ); + + return bool(imag_func); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if two tensor views are NOT equal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +bool TensorNotEquals( + TensorView const &lhs, + TensorView const &rhs) { + + // Extents must be identical + if (lhs.extent() != rhs.extent()) { + return true; + } + + detail::TensorEqualsFunc func(lhs, rhs); + TensorForEach( + lhs.extent(), + func + ); + + return !bool(func); +} + +/// Returns true if two tensor views are equal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +bool TensorNotEquals( + TensorViewPlanarComplex const &lhs, + TensorViewPlanarComplex const &rhs) { + + return !TensorEquals(lhs, rhs); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorContainsFunc { + + // + // Data members + // + + TensorView view; + Element value; + bool contains; + Coord location; + + // + // Methods + // + + /// Ctor + TensorContainsFunc(): contains(false) { } + + /// Ctor + TensorContainsFunc( + TensorView const &view_, + Element value_ + ) : + view(view_), value(value_), contains(false) { } + + /// Visits a coordinate + void operator()(Coord const &coord) { + + if (view.at(coord) == value) { + if (!contains) { + location = coord; + } + contains = true; + } + } + + /// Returns true if equal + operator bool() const { + return contains; + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if a value is present in a tensor +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +bool TensorContains( + TensorView const & view, + Element value) { + + detail::TensorContainsFunc func( + view, + value + ); + + TensorForEach( + view.extent(), + func + ); + + return bool(func); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns a pair containing a boolean of whether a value exists in a tensor and the location of +/// of the first occurrence. If the value is not contained in the tensor, the second element of the +/// pair is undefined. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +std::pair > TensorFind( + TensorView const & view, + Element value) { + + detail::TensorContainsFunc func( + view, + value + ); + + TensorForEach( + view.extent(), + func + ); + + return std::make_pair(bool(func), func.location); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a4a5b4e38672016e7fcf010abc742ee74409a774 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.hpp @@ -0,0 +1,101 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Provides several functions for filling tensors with data. +*/ + +#pragma once + +// Standard Library includes +#include +#include +#include + +// Cute includes +#include "cute/tensor.hpp" + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/quaternion.h" +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Returns true if two tensor views are equal. +template < + typename TensorL, + typename TensorR +> +bool TensorEquals( + TensorL lhs, + TensorR rhs) { + + // Extents must be identical + if (cute::size(lhs) != cute::size(rhs)) { + return false; + } + + for (int64_t idx = 0; idx < cute::size(lhs); ++idx) { + if (lhs(idx) != rhs(idx)) { + return false; + } + } + + return true; +} + +/// Returns true if two tensor views are NOT equal. +template < + typename TensorL, + typename TensorR +> +bool TensorNotEquals( + TensorL lhs, + TensorR rhs) { + + return TensorEquals(lhs, rhs); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_copy.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..053511c11665069b8e27b983f99a0e7c0c75e430 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_copy.h @@ -0,0 +1,256 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines host-side elementwise operations on TensorView. +*/ + +#pragma once + +// Standard Library includes +#include + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "tensor_foreach.h" + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Helper to convert between types +template < + typename DstElement, + typename SrcElement +> +struct TrivialConvert { + + TrivialConvert() { } + + DstElement operator()(SrcElement src) const { + return DstElement(src); + } +}; + +/// Helper to conditionally copy between tensor views. +template < + typename DstElement, + typename DstLayout, + typename SrcElement, + typename SrcLayout, + typename F +> +struct TensorCopyIf { + + using DstTensorView = TensorView; + using SrcTensorView = TensorView; + + // + // Data members + // + + DstTensorView dst; + SrcTensorView src; + F convert; + + // + // Methods + // + + TensorCopyIf() { } + + TensorCopyIf( + DstTensorView const &dst_, + SrcTensorView const &src_, + F const &convert_): dst(dst_), src(src_), convert(convert_) {} + + /// Copies based on destination and source bounds + void operator()(Coord const &coord) { + if (dst.contains(coord) && src.contains(coord)) { + dst.at(coord) = convert(src.at(coord)); + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies elements from one tensor view into another, satisfying bounds of each tensor. +template < + typename DstElement, /// Destination tensor's element type + typename DstLayout, /// Destination tensor's layout + typename SrcElement, /// Source tensor's element type + typename SrcLayout, /// Source tensor's layout + typename F /// Transformation functor +> +void TensorCopy( + TensorView dst, + TensorView src, + F const &transform) { + + using CopyIf = detail::TensorCopyIf< + DstElement, + DstLayout, + SrcElement, + SrcLayout, + F>; + + CopyIf copy_if(dst, src, transform); + + TensorForEach(dst.extent(), copy_if); +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies elements from a TensorRef into a TensorView. Assumes source tensor has sufficient extent +/// to avoid out of bounds accesses. +template < + typename DstElement, /// Destination tensor's element type + typename DstLayout, /// Destination tensor's layout + typename SrcElement, /// Source tensor's element type + typename SrcLayout, /// Source tensor's layout + typename F /// Transformation functor +> +void TensorCopy( + TensorView dst, + TensorRef src, + F const &transform) { + + using CopyIf = detail::TensorCopyIf< + DstElement, + DstLayout, + SrcElement, + SrcLayout, + F>; + + TensorView src_view(src, dst.extent()); + + CopyIf copy_if(dst, src_view, transform); + + TensorForEach(dst.extent(), copy_if); +} + +/// Copies elements from a TensorRef into a TensorView. Assumes source tensor has sufficient extent +/// to avoid out of bounds accesses. +template < + typename DstElement, /// Destination tensor's element type + typename DstLayout, /// Destination tensor's layout + typename SrcElement, /// Source tensor's element type + typename SrcLayout, /// Source tensor's layout + typename F /// Transformation functor +> +void TensorCopy( + TensorRef dst, + TensorView src, + F const &transform) { + + using CopyIf = detail::TensorCopyIf< + DstElement, + DstLayout, + SrcElement, + SrcLayout, + F>; + + TensorView dst_view(dst, src.extent()); + + CopyIf copy_if(dst_view, src, transform); + + TensorForEach(src.extent(), copy_if); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds +/// if SrcElement can be converted to DstElement. +template < + typename DstElement, /// Destination tensor's element type + typename DstLayout, /// Destination tensor's layout + typename SrcElement, /// Source tensor's element type + typename SrcLayout /// Source tensor's layout +> +void TensorCopy( + TensorView dst, + TensorView src) { + + detail::TrivialConvert convert; + + TensorCopy(dst, src, convert); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds +/// if SrcElement can be converted to DstElement. +template < + typename DstElement, /// Destination tensor's element type + typename DstLayout, /// Destination tensor's layout + typename SrcElement, /// Source tensor's element type + typename SrcLayout, /// Source tensor's layout + typename F /// Transformation functor +> +void TensorCopy( + TensorView dst, + TensorRef src) { + + detail::TrivialConvert convert; + + TensorCopy(dst, src, convert); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds +/// if SrcElement can be converted to DstElement. +template < + typename DstElement, /// Destination tensor's element type + typename DstLayout, /// Destination tensor's layout + typename SrcElement, /// Source tensor's element type + typename SrcLayout /// Source tensor's layout +> +void TensorCopy( + TensorRef dst, + TensorView src) { + + detail::TrivialConvert convert; + + TensorCopy(dst, src, convert); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_elementwise.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_elementwise.h new file mode 100644 index 0000000000000000000000000000000000000000..72f5f24daa830a0baeb0986daaec0ea456002da3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_elementwise.h @@ -0,0 +1,341 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Defines host-side elementwise operations on TensorView. +*/ + +#pragma once + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/functional.h" + +#include "tensor_foreach.h" + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Helper to apply a binary operator in place +template < + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB, + typename ElementD, + typename LayoutD, + typename BinaryFunc> +struct TensorFuncBinaryOp { + + // + // Data members + // + + /// View of left-hand-side tensor + TensorView view_d; + TensorRef view_a; + TensorRef view_b; + BinaryFunc func; + + // + // Methods + // + + /// Constructor + TensorFuncBinaryOp() { } + + /// Constructor + TensorFuncBinaryOp( + TensorView const & view_d_, + TensorRef const & view_a_, + TensorRef const & view_b_, + BinaryFunc func = BinaryFunc() + ): + view_d(view_d_), view_a(view_a_), view_b(view_b_), func(func) { } + + /// Equality check + void operator()(Coord const &coord) const { + view_d.at(coord) = func( + ElementD(view_a.at(coord)), + ElementD(view_b.at(coord)) + ); + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Adds two tensors and stores in the destination tensor: d = a + b +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB +> +void TensorAdd( + TensorView d, ///< destination tensor view + TensorRef a, ///< A tensor reference + TensorRef b ///< B tensor reference +) { + + detail::TensorFuncBinaryOp< + ElementD, + LayoutD, + ElementA, + LayoutA, + ElementB, + LayoutB, + cutlass::plus + > func(d, a, b); + + TensorForEach( + d.extent(), + func); +} + +/// Adds a tensor in place: d = d .+ a +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA +> +void TensorAdd( + TensorView d, ///< destination tensor view + TensorRef a ///< A tensor reference +) { + TensorAdd(d, d, a); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Subtracts two tensors and stores in the destination tensor: d = a - b +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB +> +void TensorSub( + TensorView d, ///< destination tensor view + TensorRef a, ///< A tensor reference + TensorRef b ///< B tensor reference + ) { + + detail::TensorFuncBinaryOp< + ElementD, + LayoutD, + ElementA, + LayoutA, + ElementB, + LayoutB, + cutlass::minus + > func(d, a, b); + + TensorForEach( + d.extent(), + func); +} + +/// Subtracts two tensors in place: d = d .- a +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB +> +void TensorSub( + TensorView d, ///< destination tensor view + TensorRef a ///< A tensor reference + ) { + + TensorSub(d, d, a); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Multiplies two tensors and stores in the destination tensor: d = a .* b +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB +> +void TensorMul( + TensorView d, ///< destination tensor view + TensorRef a, ///< A tensor reference + TensorRef b ///< B tensor reference +) { + + detail::TensorFuncBinaryOp< + ElementD, + LayoutD, + ElementA, + LayoutA, + ElementB, + LayoutB, + cutlass::multiplies + > func(d, a, b); + + TensorForEach( + d.extent(), + func); +} + +/// Multiplies tensors in place: d = d .* a +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA +> +void TensorMul( + TensorView d, ///< destination tensor view + TensorRef a ///< A tensor reference +) { + TensorMul(d, d, a); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Divides two tensors and stores in the destination tensor: d = a ./ b +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB +> +void TensorDiv( + TensorView d, ///< destination tensor view + TensorRef a, ///< A tensor reference + TensorRef b ///< B tensor reference +) { + + detail::TensorFuncBinaryOp< + ElementD, + LayoutD, + ElementA, + LayoutA, + ElementB, + LayoutB, + cutlass::divides + > func(d, a, b); + + TensorForEach( + d.extent(), + func); +} + +/// Divides tensors in place: d = d ./ a +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA +> +void TensorDiv( + TensorView d, ///< destination tensor view + TensorRef a ///< A tensor reference +) { + TensorDiv(d, d, a); +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Divides two tensors and stores in the destination tensor: d = a ./ b +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA, + typename ElementB, + typename LayoutB +> +void TensorModulus( + TensorView d, ///< destination tensor view + TensorRef a, ///< A tensor reference + TensorRef b ///< B tensor reference +) { + + detail::TensorFuncBinaryOp< + ElementD, + LayoutD, + ElementA, + LayoutA, + ElementB, + LayoutB, + cutlass::divides + > func(d, a, b); + + TensorForEach( + d.extent(), + func); +} + +/// Divides tensors in place: d = d ./ a +template < + typename ElementD, + typename LayoutD, + typename ElementA, + typename LayoutA +> +void TensorModulus( + TensorView d, ///< destination tensor view + TensorRef a ///< A tensor reference +) { + TensorDiv(d, d, a); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.h new file mode 100644 index 0000000000000000000000000000000000000000..9b0dcdb374fd92212fb0f3ea00120f2aafc08952 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.h @@ -0,0 +1,1549 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Provides several functions for filling tensors with data. +*/ + +#pragma once + +// Standard Library includes +#include +#include +#include +#include + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/quaternion.h" +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/subbyte_reference.h" +#include "cutlass/tensor_view.h" +#include "cutlass/tensor_view_planar_complex.h" +#include "cutlass/blas3.h" + +#include "cutlass/util/distribution.h" +#include "tensor_foreach.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + Element value; + + // + // Methods + // + + TensorFillFunc( + TensorView const &view_ = TensorView(), + Element value_ = Element(0) + ): view(view_), value(value_) { } + + void operator()(Coord const & coord) const { + view.at(coord) = value; + } +}; + +/// Returns a pair of values of the Gaussian distribution generated by the Box Muller method +struct BoxMullerFunc { + + BoxMullerFunc() {} + + void operator()( + double* rnd, ///< Size-2 vector to be filled with random values + double mean = 0, ///< Mean of the Gaussian distribution + double stddev = 1, ///< Standard deviation of the Gaussian distribution + double pi = std::acos(-1)) const { + + double u1 = double(std::rand()) / double(RAND_MAX); + double u2 = double(std::rand()) / double(RAND_MAX); + rnd[0] = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2); + rnd[1] = std::sqrt(-2 * std::log(u1)) * std::sin(2 * pi * u2); + rnd[0] = mean + stddev * rnd[0]; + rnd[1] = mean + stddev * rnd[1]; + } +}; +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with a uniform value +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFill( + TensorView dst, ///< destination tensor + Element val = Element(0)) { ///< value to uniformly fill it with + + detail::TensorFillFunc func(dst, val); + + TensorForEach( + dst.extent(), + func + ); +} + +/// Fills a tensor with a uniform value +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFill( + TensorViewPlanarComplex dst, ///< destination tensor + cutlass::complex val = cutlass::complex(0)) { ///< value to uniformly fill it with + + TensorFill(dst.view_real(), val.real()); + TensorFill(dst.view_imag(), val.imag()); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct RandomGaussianFunc { + + uint64_t seed; + double mean; + double stddev; + int int_scale; + double pi; + double pnz; + + // + // Methods + // + RandomGaussianFunc( + uint64_t seed_ = 0, + double mean_ = 0, + double stddev_ = 1, + int int_scale_ = -1, + double pnz_ = 100.0 + ): + seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) { + std::srand((unsigned)seed); + } + + /// Compute random value and update RNG state + Element operator()() const { + + // Box-Muller transform to generate random numbers with Normal distribution + double u1 = double(std::rand()) / double(RAND_MAX); + double u2 = double(std::rand()) / double(RAND_MAX); + + // Compute Gaussian random value + double rnd = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2); + rnd = mean + stddev * rnd; + + // Scale and convert final result + Element result; + + // Sample from the Bernoulli distribution, and use the result to sample from the Gaussian + std::random_device rnd_device; + std::mt19937 bernoulli_rnd(rnd_device()); + std::bernoulli_distribution bernoulli_dist(pnz / 100); + bool bernoulli_result = bernoulli_dist(bernoulli_rnd); + + // Sample from the Gaussian distribution for a nonzero element + if (bernoulli_result) { + if (int_scale >= 0) { + rnd = double(int64_t(rnd * double(1 << int_scale))) / double(1 << int_scale); + result = static_cast(rnd); + } + else { + result = static_cast(rnd); + } + } + else { + result = static_cast(0); + } + + return result; + } +}; + +/// Partial specialization for initializing a complex value. +template +struct RandomGaussianFunc > { + + uint64_t seed; + double mean; + double stddev; + int int_scale; + double pi; + double pnz; + + // + // Methods + // + RandomGaussianFunc( + uint64_t seed_ = 0, + double mean_ = 0, + double stddev_ = 1, + int int_scale_ = -1, + double pnz_ = 100.0 + ): + seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) { + std::srand((unsigned)seed); + } + + /// Compute random value and update RNG state + complex operator()() const { + + Element reals[2]; + + double rnd[2]; + detail::BoxMullerFunc func; + func(rnd, mean, stddev, pi); + + // Sample from the Bernoulli distribution, and use the result to sample from the Gaussian + std::random_device rnd_device; + std::mt19937 bernoulli_rnd(rnd_device()); + std::bernoulli_distribution bernoulli_dist(pnz / 100); + bool bernoulli_result = bernoulli_dist(bernoulli_rnd); + + // Sample from the Gaussian distribution for a nonzero element + if (bernoulli_result) { + if (int_scale >= 0) { + rnd[0] = double(int(rnd[0] * double(1 << int_scale))); + rnd[1] = double(int(rnd[1] * double(1 << int_scale))); + reals[0] = from_real(rnd[0] / double(1 << int_scale)); + reals[1] = from_real(rnd[1] / double(1 << int_scale)); + } + else { + reals[0] = from_real(rnd[0]); + reals[1] = from_real(rnd[1]); + } + } + else { + reals[0] = from_real(0); + reals[1] = from_real(0); + } + + return complex(reals[0], reals[1]); + } +}; + +/// Partial specialization for initializing a complex value. +template +struct RandomGaussianFunc > { + + uint64_t seed; + double mean; + double stddev; + int int_scale; + double pi; + double pnz; + + // + // Methods + // + RandomGaussianFunc( + uint64_t seed_ = 0, + double mean_ = 0, + double stddev_ = 1, + int int_scale_ = -1, + double pnz_ = 100.0 + ): + seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) { + std::srand((unsigned)seed); + } + + /// Compute random value and update RNG state + Quaternion operator()() const { + + Element reals[4]; + + double rnd1[2]; + double rnd2[2]; + detail::BoxMullerFunc func; + func(rnd1, mean, stddev, pi); + func(rnd2, mean, stddev, pi); + + // Sample from the Bernoulli distribution, and use the result to sample from the Gaussian + std::random_device rnd_device; + std::mt19937 bernoulli_rnd(rnd_device()); + std::bernoulli_distribution bernoulli_dist(pnz / 100); + bool bernoulli_result = bernoulli_dist(bernoulli_rnd); + + // Sample from the Gaussian distribution for a nonzero element + if (bernoulli_result) { + if (int_scale >= 0) { + rnd1[0] = double(int(rnd1[0] * double(1 << int_scale))); + rnd1[1] = double(int(rnd1[1] * double(1 << int_scale))); + rnd2[0] = double(int(rnd2[0] * double(1 << int_scale))); + rnd2[1] = double(int(rnd2[1] * double(1 << int_scale))); + + reals[0] = from_real(rnd1[0] / double(1 << int_scale)); + reals[1] = from_real(rnd1[1] / double(1 << int_scale)); + reals[2] = from_real(rnd2[0] / double(1 << int_scale)); + reals[3] = from_real(rnd2[1] / double(1 << int_scale)); + } + else { + reals[0] = from_real(rnd1[0]); + reals[1] = from_real(rnd1[1]); + reals[2] = from_real(rnd2[0]); + reals[3] = from_real(rnd2[1]); + } + } + else { + reals[0] = from_real(0); + reals[1] = from_real(0); + reals[2] = from_real(0); + reals[3] = from_real(0); + } + + return Quaternion(reals[0], reals[1], reals[2], reals[3]); + } +}; + +/// Computes a random Gaussian distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillGaussianFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + RandomGaussianFunc func; + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + TensorFillGaussianFunc( + TensorView view_ = TensorView(), + RandomGaussianFunc func_ = RandomGaussianFunc() + ): + view(view_), func(func_) { + + } + + /// Compute random value and update RNG state + void operator()(Coord const &coord) const { + view.at(coord) = func(); + } +}; + +/// Computes a random Gaussian distribution for a rank-2 tensor +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillSymmetricGaussianFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + RandomGaussianFunc func; + cutlass::FillMode fill_mode; + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + TensorFillSymmetricGaussianFunc( + TensorView view_ = TensorView(), + RandomGaussianFunc func_ = RandomGaussianFunc(), + cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid + ): + view(view_), func(func_), fill_mode(fill_mode_) { + + } + + /// Compute random value and update RNG state + void operator()(Coord const &coord) const { + // Fill half of matrix based on FillMode + if (Layout::kRank == 2 && + fill_mode == cutlass::FillMode::kLower && + coord[0] >= coord[1]) { + view.at(coord) = func(); + } else if (Layout::kRank == 2 && + fill_mode == cutlass::FillMode::kUpper && + coord[0] <= coord[1]) { + view.at(coord) = func(); + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a Gaussian distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomGaussian( + TensorView dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + double mean = 0, ///< Gaussian distribution's mean + double stddev = 1, ///< Gaussian distribution's standard deviation + int bits = -1, ///< If non-negative, specifies number of fractional bits that + double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of + /// data. + + detail::RandomGaussianFunc random_func(seed, mean, stddev, bits, pnz); + + detail::TensorFillGaussianFunc func( + dst, + random_func + ); + + TensorForEach( + dst.extent(), + func + ); +} + +/// Fills a tensor with random values with a Gaussian distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomGaussian( + TensorViewPlanarComplex dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + double mean = 0, ///< Gaussian distribution's mean + double stddev = 1, ///< Gaussian distribution's standard deviation + int bits = -1, ///< If non-negative, specifies number of fractional bits that + double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of + /// data. + + TensorFillRandomGaussian(dst.view_real(), seed, mean, stddev, bits, pnz); + TensorFillRandomGaussian(dst.view_imag(), ~seed, mean, stddev, bits, pnz); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a Gaussian distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillSymmetricRandomGaussian( + TensorView dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices + double mean = 0, ///< Gaussian distribution's mean + double stddev = 1, ///< Gaussian distribution's standard deviation + int bits = -1, ///< If non-negative, specifies number of fractional bits that + double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of + /// data. + + detail::RandomGaussianFunc random_func(seed, mean, stddev, bits, pnz); + + detail::TensorFillSymmetricGaussianFunc func( + dst, + random_func, + fill_mode + ); + + TensorForEach( + dst.extent(), + func + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values of a Gaussian distribution. +template < + typename Element ///< Element type +> +void BlockFillRandomGaussian( + Element *ptr, ///< destination buffer + size_t capacity, ///< number of elements + uint64_t seed, ///< seed for RNG + double mean = 0, ///< Gaussian distribution's mean + double stddev = 1, ///< Gaussian distribution's standard deviation + int bits = -1, ///< If non-negative, specifies number of fractional bits that + double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of + /// data. + + + detail::RandomGaussianFunc random_func(seed, mean, stddev, bits, pnz); + + for (size_t i = 0; i < capacity; ++i) { + ReferenceFactory::get(ptr, i) = random_func(); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct RandomUniformFunc { + + using Real = typename RealType::Type; + + uint64_t seed; + double range; + double min; + int int_scale; + + // + // Methods + // + + RandomUniformFunc( + uint64_t seed_ = 0, + double max = 1, + double min_ = 0, + int int_scale_ = -1 + ): + seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { + std::srand((unsigned)seed); + } + + + /// Compute random value and update RNG state + Element operator()() const { + + double rnd = double(std::rand()) / double(RAND_MAX); + + rnd = min + range * rnd; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + Element result; + + if (int_scale >= 0) { + rnd = double(int64_t(rnd * double(1 << int_scale))) / double(1 << int_scale); + result = static_cast(Real(rnd)); + } + else { + result = static_cast(Real(rnd)); + } + + return result; + } +}; + +/// Partial specialization for initializing a complex value. +template +struct RandomUniformFunc > { + + using Real = typename RealType::Type; + + uint64_t seed; + double range; + double min; + int int_scale; + + // + // Methods + // + + RandomUniformFunc( + uint64_t seed_ = 0, + double max = 1, + double min_ = 0, + int int_scale_ = -1 + ): + seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { + std::srand((unsigned)seed); + } + + + /// Compute random value and update RNG state + complex operator()() const { + + Element reals[2]; + + for (int i = 0; i < 2; ++i) { + double rnd = double(std::rand()) / double(RAND_MAX); + + rnd = min + range * rnd; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + + if (int_scale >= 0) { + rnd = double(int(rnd * double(1 << int_scale))); + reals[i] = from_real(Real(rnd / double(1 << int_scale))); + } + else { + reals[i] = from_real(Real(rnd)); + } + } + + return complex(reals[0], reals[1]); + } +}; + +/// Partial specialization for initializing a Quaternion value. +template +struct RandomUniformFunc > { + + using Real = typename RealType::Type; + + uint64_t seed; + double range; + double min; + int int_scale; + + // + // Methods + // + + RandomUniformFunc( + uint64_t seed_ = 0, + double max = 1, + double min_ = 0, + int int_scale_ = -1 + ): + seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { + std::srand((unsigned)seed); + } + + + /// Compute random value and update RNG state + Quaternion operator()() const { + + Element reals[4]; + + for (int i = 0; i < 4; ++i) { + double rnd = double(std::rand()) / double(RAND_MAX); + + rnd = min + range * rnd; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + + if (int_scale >= 0) { + rnd = double(int(rnd * double(1 << int_scale))); + reals[i] = from_real(Real(rnd / double(1 << int_scale))); + } + else { + reals[i] = from_real(Real(rnd)); + } + } + + return make_Quaternion(reals[0], reals[1], reals[2], reals[3]); + } +}; + +/// Computes a random uniform distribution +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillRandomUniformFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + RandomUniformFunc func; + + // + // Methods + // + + /// Construction of uniform RNG functor. + TensorFillRandomUniformFunc( + TensorView view_ = TensorView(), + RandomUniformFunc func_ = RandomUniformFunc() + ): + view(view_), func(func_) { + + } + + /// Compute random value and update RNG state + void operator()(Coord const &coord) const { + + view.at(coord) = func(); + } +}; + +/// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a uniform distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillSymmetricRandomUniformFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + RandomUniformFunc func; + cutlass::FillMode fill_mode; + + // + // Methods + // + + /// Construction of uniform RNG functor. + TensorFillSymmetricRandomUniformFunc( + TensorView view_ = TensorView(), + RandomUniformFunc func_ = RandomUniformFunc(), + cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid + ): + view(view_), func(func_), fill_mode(fill_mode_) { + + } + + /// Compute random value and update RNG state + void operator()(Coord const &coord) const { + // Fill half of matrix based on FillMode + if (Layout::kRank == 2 && + fill_mode == cutlass::FillMode::kLower && + coord[0] >= coord[1]) { + view.at(coord) = func(); + } else if (Layout::kRank == 2 && + fill_mode == cutlass::FillMode::kUpper && + coord[0] <= coord[1]) { + view.at(coord) = func(); + } + } +}; + +/// Computes a random Uniform distribution and pads diagonal with zeros +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillPadDiagonalRandomUniformFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + RandomUniformFunc func; + cutlass::FillMode fill_mode; + int alignment; + + // + // Methods + // + + /// Construction of uniform RNG functor. + TensorFillPadDiagonalRandomUniformFunc( + TensorView view_ = TensorView(), + RandomUniformFunc func_ = RandomUniformFunc(), + cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid, + int alignment_ = 1 + ): + view(view_), func(func_), fill_mode(fill_mode_), alignment(alignment_) { + + } + + /// Compute random value and update RNG state + void operator()(Coord const &coord) const { + // Fill half of matrix based on FillMode + if (Layout::kRank == 2 && + (fill_mode == cutlass::FillMode::kLower) && + (coord[0] >= coord[1]) || + ((coord[1] - coord[0]) >= alignment)) { + view.at(coord) = func(); + } else if (Layout::kRank == 2 && + fill_mode == cutlass::FillMode::kUpper && + (coord[0] <= coord[1]) || + ((coord[0] - coord[1]) >= alignment)) { + view.at(coord) = func(); + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values of a uniform random distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomUniform( + TensorView dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + double max = 1, ///< upper bound of distribution + double min = 0, ///< lower bound for distribution + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + detail::RandomUniformFunc random_func(seed, max, min, bits); + + detail::TensorFillRandomUniformFunc func( + dst, + random_func + ); + + TensorForEach( + dst.extent(), + func + ); +} + +/// Fills a tensor with random values of a uniform random distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomUniform( + TensorViewPlanarComplex dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + double max = 1, ///< upper bound of distribution + double min = 0, ///< lower bound for distribution + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + + TensorFillRandomUniform(dst.view_real(), seed, max, min, bits); + TensorFillRandomUniform(dst.view_imag(), ~seed, max, min, bits); +} + + +/// Fills a tensor with random values with a uniform random distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomUniform( + TensorView, Layout> dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + double max = 1, ///< upper bound of distribution + double min = 0, ///< lower bound for distribution + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + detail::RandomUniformFunc> random_func(seed, max, min, bits); + + detail::TensorFillRandomUniformFunc, Layout> func( + dst, + random_func + ); + + TensorForEach( + dst.extent(), + func + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillSymmetricRandomUniform( + TensorView dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices + double max = 1, ///< upper bound of distribution + double min = 0, ///< lower bound for distribution + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + + detail::RandomUniformFunc random_func(seed, max, min, bits); + + detail::TensorFillSymmetricRandomUniformFunc func( + dst, + random_func, + fill_mode + ); + + TensorForEach( + dst.extent(), + func + ); +} + +/// Fills a tensor with random values with a uniform random distribution pads zeros along diagonal +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillPadDiagonalRandomUniform( + TensorView dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices + double max = 1, ///< upper bound of distribution + double min = 0, ///< lower bound for distribution + int bits = -1, ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + int alignment = 1 +) { + + detail::RandomUniformFunc random_func(seed, max, min, bits); + + detail::TensorFillPadDiagonalRandomUniformFunc func( + dst, + random_func, + fill_mode, + alignment + ); + + TensorForEach( + dst.extent(), + func + ); +} +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template < + typename Element ///< Element type +> +void BlockFillRandomUniform( + Element *ptr, + size_t capacity, + uint64_t seed, ///< seed for RNG + double max = 1, ///< upper bound of distribution + double min = 0, ///< lower bound for distribution + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + detail::RandomUniformFunc random_func(seed, max, min, bits); + + for (size_t i = 0; i < capacity; ++i) { + ReferenceFactory::get(ptr, i) = random_func(); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillDiagonalFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + Element diag; + Element other; + + // + // Methods + // + + TensorFillDiagonalFunc( + TensorView const &view_ = TensorView(), + Element diag_ = Element(1), + Element other_ = Element(0) + ): + view(view_), diag(diag_), other(other_) { } + + void operator()(Coord const & coord) const { + bool is_diag = true; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i] != coord[i - 1]) { + is_diag = false; + break; + } + } + + view.at(coord) = (is_diag ? diag : other); + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor everywhere with a unique value for its diagonal. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillDiagonal( + TensorView dst, ///< destination tensor + Element diag = Element(1), ///< value to write in the diagonal + Element other = Element(0)) { ///< value to write off the diagonal + + detail::TensorFillDiagonalFunc func( + dst, + diag, + other + ); + + TensorForEach( + dst.extent(), + func + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Helper to fill a tensor's diagonal with 1 and 0 everywhere else. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillIdentity( + TensorView dst) { ///< destination tensor + + TensorFillDiagonal(dst, Element(1), Element(0)); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Writes a uniform value to the diagonal of a tensor without modifying off-diagonal elements. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorUpdateDiagonal( + TensorView dst, ///< destination tensor + Element val = Element(1)) { + + typename Layout::Index extent = dst.extent().min(); + + for (typename Layout::Index i = 0; i < extent; ++i) { + Coord coord(i); + dst.at(coord) = val; + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorUpdateOffDiagonalFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + Element other; + + // + // Methods + // + + TensorUpdateOffDiagonalFunc( + TensorView const &view_ = TensorView(), + Element other_ = Element(0) + ): + view(view_), other(other_) { } + + void operator()(Coord const & coord) const { + bool is_diag = true; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + if (coord[i] != coord[i - 1]) { + is_diag = false; + break; + } + } + + if (!is_diag) { + view.at(coord) = other; + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Writes a uniform value to all elements in the tensor without modifying diagonal elements. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorUpdateOffDiagonal( + TensorView dst, ///< destination tensor + Element other = Element(1)) { + + detail::TensorUpdateOffDiagonalFunc func( + dst, + other + ); + + TensorForEach( + dst.extent(), + func + ); +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillLinearFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + Array v; + Element s; + + // + // Methods + // + + TensorFillLinearFunc() { } + + /// Constructs functor + TensorFillLinearFunc( + TensorView const &view_, + Array const & v_, + Element s_ = Element(0) + ): + view(view_), v(v_), s(s_) { } + + /// Updates the tensor + void operator()(Coord const & coord) const { + + Element sum(s); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Layout::kRank; ++i) { + sum += Element(coord[i]) * v[i]; + } + + view.at(coord) = sum; + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills tensor with a linear combination of its coordinate and another vector +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillLinear( + TensorView dst, ///< destination tensor + Array const & v, + Element s = Element(0)) { + + detail::TensorFillLinearFunc func( + dst, + v, + s + ); + + TensorForEach( + dst.extent(), + func + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills tensor with a linear combination of its coordinate and another vector +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillSequential( + TensorView dst, ///< destination tensor + Element s = Element(0)) { + + Array stride; + + stride[0] = Element(1); + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < Layout::kRank; ++i) { + stride[i] = stride[i - 1] * Element(dst.extent()[i - 1]); + } + + TensorFillLinear(dst, stride, s); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values from a distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandom( + TensorView view, ///< destination tensor + uint64_t seed, + Distribution dist) { + + using Real = typename RealType::Type; + + if (dist.kind == Distribution::Gaussian) { + TensorFillRandomGaussian( + view, + seed, + static_cast(dist.gaussian.mean), + static_cast(dist.gaussian.stddev), + dist.int_scale); + } else if (dist.kind == Distribution::Uniform) { + TensorFillRandomUniform( + view, + seed, + static_cast(dist.uniform.max), + static_cast(dist.uniform.min), + dist.int_scale); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a block of data with sequential elements +template < + typename Element +> +void BlockFillSequential( + Element *ptr, + int64_t capacity, + Element v = Element(1), + Element s = Element(0)) { + int i = 0; + + while (i < capacity) { + cutlass::ReferenceFactory::value < + 8)>::get(ptr, i) = s; + + s = Element(s + v); + ++i; + } +} + +/// Fills a block of data with sequential elements +template < + typename Element +> +void BlockFillSequentialModN( + Element *ptr, + int64_t capacity, + int64_t mod, + int64_t v = int64_t(1), + int64_t s = int64_t(0)) { + int i = 0; + + while (i < capacity) { + cutlass::ReferenceFactory::value < + 8)>::get(ptr, i) = Element(s); + + s = int64_t(s + v) % mod; + ++i; + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a block of data with sequential elements +template < + typename Element +> +void BlockFillRandom( + Element *ptr, + size_t capacity, + uint64_t seed, + Distribution dist) { + + if (dist.kind == Distribution::Gaussian) { + BlockFillRandomGaussian( + ptr, + capacity, + seed, + dist.gaussian.mean, + dist.gaussian.stddev, + dist.int_scale, + dist.gaussian.pnz); + } + else if (dist.kind == Distribution::Uniform) { + BlockFillRandomUniform( + ptr, + capacity, + seed, + dist.uniform.max, + dist.uniform.min, + dist.int_scale); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct RandomSparseMetaFunc { + + uint64_t seed; + int range; + int MetaSizeInBits; + + // + // Methods + // + + RandomSparseMetaFunc( + uint64_t seed_ = 0, + int MetaSizeInBits_ = 2 + ): + seed(seed_), MetaSizeInBits(MetaSizeInBits_) { + std::srand((unsigned)seed); + if (MetaSizeInBits_ == 2) { + range = 6; + } else if (MetaSizeInBits_ == 4) { + range = 2; + } + } + + /// Compute random value and update RNG state + Element operator()() const { + Element FourToTwoMeta[6] = {0x4, 0x8, 0x9, 0xc, 0xd, 0xe}; + Element TwoToOneMeta[2] = {0x4, 0xe}; + + Element * MetaArray = (MetaSizeInBits == 2) ? FourToTwoMeta : TwoToOneMeta; + + Element result = 0x0; + + for (int i = 0; i < cutlass::sizeof_bits::value / 4; ++i) { + int rnd = std::rand() % range; + Element meta = MetaArray[rnd]; + + result = (Element)(result | ((Element)(meta << (i * 4)))); + } + + return result; + } +}; + +/// Computes a random sparse meta +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +struct TensorFillRandomSparseMetaFunc { + + using TensorView = TensorView; + + // + // Data members + // + + TensorView view; + RandomSparseMetaFunc func; + + // + // Methods + // + + /// Construction of Gaussian RNG functor. + TensorFillRandomSparseMetaFunc( + TensorView view_ = TensorView(), + RandomSparseMetaFunc func_ = RandomSparseMetaFunc() + ): + view(view_), func(func_) { + + } + + /// Compute random value and update RNG state + void operator()(Coord const &coord) const { + + view.at(coord) = func(); + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomSparseMeta( + TensorView dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + int MetaSizeInBits) { ///< 2 bit or 4 bit + + detail::RandomSparseMetaFunc random_func(seed, MetaSizeInBits); + + detail::TensorFillRandomSparseMetaFunc func( + dst, + random_func + ); + + TensorForEach( + dst.extent(), + func + ); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template < + typename Element ///< Element type +> +void BlockFillRandomSparseMeta( + Element *ptr, + size_t capacity, + uint64_t seed, ///< seed for RNG + int MetaSizeInBits) { ///< 2 bit or 4bit + + detail::RandomSparseMetaFunc random_func(seed, MetaSizeInBits); + + for (size_t i = 0; i < capacity; ++i) { + ptr[i] = random_func(); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a ell block index matrix with random values with a uniform random distribution. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorFillRandomEllIdx( + TensorView dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + int rows, int ell_cols, int cols) { ///< dimension of the matrix + + std::srand((unsigned)seed); + + for (int i = 0; i < rows; ++i) { + int col_idx = std::rand() % cols; + + for (int j = 0; j < ell_cols; ++j) { + dst.at({i, j}) = col_idx; + + if (col_idx != -1) { + if (col_idx == (cols - 1)) { + col_idx = -1; + } else { + col_idx = std::rand() % (cols - col_idx - 1) + col_idx + 1; + } + } + } + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies a diagonal in from host memory without modifying off-diagonal elements. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorCopyDiagonalIn( + TensorView dst, ///< destination tensor + Element const *ptr) { ///< dense buffer of elements + + typename Layout::Index extent = dst.extent().min(); + + for (typename Layout::Index i = 0; i < extent; ++i) { + Coord coord(i); + dst.at(coord) = ReferenceFactory::get(ptr, i); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Copies the diagonal of a tensor into a dense buffer in host memory. +template < + typename Element, ///< Element type + typename Layout> ///< Layout function +void TensorCopyDiagonalOut( + Element *ptr, ///< dense buffer of elements + TensorView src) { ///< source tensor + + typename Layout::Index extent = src.extent().min(); + + for (typename Layout::Index i = 0; i < extent; ++i) { + Coord coord(i); + ReferenceFactory::get(ptr, i) = src.at(coord); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3262c535275c7a4199576760b0d1e1c9781edea3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.hpp @@ -0,0 +1,432 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Provides several functions for filling tensors with data. +*/ + +#pragma once + +// Standard Library includes +#include +#include +#include + +// Cute includes +#include "cute/tensor.hpp" + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/quaternion.h" +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Uniform and procedural tensor fills +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with a scalar element +template +void TensorFill(Tensor dst, typename Tensor::value_type element) { + + for (int64_t idx = 0; idx < cute::size(dst); ++idx) { + dst(idx) = element; + } +} + +/// Fills a tensor with the contents of its layout +template +void TensorFillSequential(Tensor dst) { + + auto layout = dst.layout(); + + for (int64_t idx = 0; idx < cute::size(dst); ++idx) { + dst(idx) = layout(idx); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Random uniform values +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct RandomUniformFunc { + + using Real = typename RealType::Type; + + uint64_t seed; + double range; + double min; + int int_scale; + + // + // Methods + // + + RandomUniformFunc( + uint64_t seed_ = 0, + double max = 1, + double min_ = 0, + int int_scale_ = -1 + ): + seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { + std::srand((unsigned)seed); + } + + + /// Compute random value and update RNG state + Element operator()() const { + + double rnd = double(std::rand()) / double(RAND_MAX); + + rnd = min + range * rnd; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + Element result; + + if (int_scale >= 0) { + rnd = double(int64_t(rnd * double(1 << int_scale))) / double(1 << int_scale); + result = static_cast(Real(rnd)); + } + else { + result = static_cast(Real(rnd)); + } + + return result; + } +}; + +/// Partial specialization for initializing a complex value. +template +struct RandomUniformFunc > { + + using Real = typename RealType::Type; + + uint64_t seed; + double range; + double min; + int int_scale; + + // + // Methods + // + + RandomUniformFunc( + uint64_t seed_ = 0, + double max = 1, + double min_ = 0, + int int_scale_ = -1 + ): + seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { + std::srand((unsigned)seed); + } + + + /// Compute random value and update RNG state + complex operator()() const { + + Element reals[2]; + + for (int i = 0; i < 2; ++i) { + double rnd = double(std::rand()) / double(RAND_MAX); + + rnd = min + range * rnd; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + + if (int_scale >= 0) { + rnd = double(int(rnd * double(1 << int_scale))); + reals[i] = from_real(Real(rnd / double(1 << int_scale))); + } + else { + reals[i] = from_real(Real(rnd)); + } + } + + return complex(reals[0], reals[1]); + } +}; + +/// Partial specialization for initializing a Quaternion value. +template +struct RandomUniformFunc > { + + using Real = typename RealType::Type; + + uint64_t seed; + double range; + double min; + int int_scale; + + // + // Methods + // + + RandomUniformFunc( + uint64_t seed_ = 0, + double max = 1, + double min_ = 0, + int int_scale_ = -1 + ): + seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { + std::srand((unsigned)seed); + } + + + /// Compute random value and update RNG state + Quaternion operator()() const { + + Element reals[4]; + + for (int i = 0; i < 4; ++i) { + double rnd = double(std::rand()) / double(RAND_MAX); + + rnd = min + range * rnd; + + // Random values are cast to integer after scaling by a power of two to facilitate error + // testing + + if (int_scale >= 0) { + rnd = double(int(rnd * double(1 << int_scale))); + reals[i] = from_real(Real(rnd / double(1 << int_scale))); + } + else { + reals[i] = from_real(Real(rnd)); + } + } + + return make_Quaternion(reals[0], reals[1], reals[2], reals[3]); + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a uniform random distribution. +template ///< Tensor object +void TensorFillRandomUniform( + Tensor dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + double max = 1, ///< upper bound of distribution + double min = 0, ///< lower bound for distribution + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + + detail::RandomUniformFunc random_func(seed, max, min, bits); + + for (int64_t idx = 0; idx < cute::size(dst); ++idx) { + dst(idx) = random_func(); + } +} + +/// Fills a block with random values with a uniform random distribution. +template < + typename Element ///< Element type +> +void BlockFillRandomUniform( + Element *ptr, + size_t capacity, + uint64_t seed, ///< seed for RNG + double max = 1, ///< upper bound of distribution + double min = 0, ///< lower bound for distribution + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + detail::RandomUniformFunc random_func(seed, max, min, bits); + + for (size_t i = 0; i < capacity; ++i) { + ptr[i] = random_func(); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Random Gaussian +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct RandomGaussianFunc { + + uint64_t seed; + double mean; + double stddev; + int int_scale; + double pi; + + // + // Methods + // + RandomGaussianFunc( + uint64_t seed_ = 0, + double mean_ = 0, + double stddev_ = 1, + int int_scale_ = -1 + ): + seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)) { + std::srand((unsigned)seed); + } + + /// Compute random value and update RNG state + Element operator()() const { + + // Box-Muller transform to generate random numbers with Normal distribution + double u1 = double(std::rand()) / double(RAND_MAX); + double u2 = double(std::rand()) / double(RAND_MAX); + + // Compute Gaussian random value + double rnd = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2); + rnd = mean + stddev * rnd; + + // Scale and convert final result + Element result; + + if (int_scale >= 0) { + rnd = double(int64_t(rnd * double(1 << int_scale))) / double(1 << int_scale); + result = static_cast(rnd); + } + else { + result = static_cast(rnd); + } + + return result; + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a tensor with random values with a Gaussian distribution. +template < + typename Tensor +> +void TensorFillRandomGaussian( + Tensor dst, ///< destination tensor + uint64_t seed, ///< seed for RNG + double mean = 0, ///< Gaussian distribution's mean + double stddev = 1, ///< Gaussian distribution's standard deviation + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + + detail::RandomGaussianFunc random_func(seed, mean, stddev, bits); + + for (int64_t idx = 0; idx < cute::size(dst); ++idx) { + dst(idx) = random_func(); + } +} + +/// Fills a block with random values with a Gaussian distribution. +template < + typename Element ///< Element type +> +void BlockFillRandomGaussian( + Element *ptr, ///< destination buffer + size_t capacity, ///< number of elements + uint64_t seed, ///< seed for RNG + double mean = 0, ///< Gaussian distribution's mean + double stddev = 1, ///< Gaussian distribution's standard deviation + int bits = -1) { ///< If non-negative, specifies number of fractional bits that + /// are not truncated to zero. Permits reducing precision of + /// data. + + detail::RandomGaussianFunc random_func(seed, mean, stddev, bits); + + for (size_t i = 0; i < capacity; ++i) { + ptr[i] = random_func(); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Fills a block of data with sequential elements +template < + typename Element +> +void BlockFillSequential( + Element *ptr, + int64_t capacity, + Element v = Element(1), + Element s = Element(0)) { + int i = 0; + + while (i < capacity) { + + ptr[i] = Element(s + v); + ++i; + } +} + +/// Fills a block of data with sequential elements +template < + typename Element +> +void BlockFillSequentialModN( + Element *ptr, + int64_t capacity, + int64_t mod, + int64_t v = int64_t(1), + int64_t s = int64_t(0)) { + int i = 0; + + while (i < capacity) { + + ptr[i] = static_cast(int32_t(int64_t(s + v) % mod)); + ++i; + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_foreach.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_foreach.h new file mode 100644 index 0000000000000000000000000000000000000000..68a36d86f07a9fccc2185dd7911f4506fc39f8fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_foreach.h @@ -0,0 +1,134 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include +#include "cutlass/cutlass.h" + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Defines several helpers +namespace detail { + +/// Helper to perform for-each operation +template +struct TensorForEachHelper { + + /// Index of the active rank + static int const kActiveRank = Rank - RankRemaining - 1; + + /// Constructor for general rank + TensorForEachHelper( + Func &func, + Coord const &extent, + Coord &coord) { + + for (int i = 0; i < extent.at(kActiveRank); ++i) { + coord[kActiveRank] = i; + TensorForEachHelper(func, extent, coord); + } + } +}; + +/// Helper to perform for-each operation +template +struct TensorForEachHelper { + + /// Index of the active rank + static int const kActiveRank = Rank - 1; + + /// Constructor for fastest changing rank + TensorForEachHelper( + Func &func, + Coord const &extent, + Coord &coord) { + + for (int i = 0; i < extent.at(kActiveRank); ++i) { + coord[kActiveRank] = i; + func(coord); + } + } +}; + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Iterates over the index space of a tensor +template < + typename Func, ///< function applied to each point in a tensor's index space + int Rank> ///< rank of index space +void TensorForEach(Coord extent, Func & func) { + Coord coord; + detail::TensorForEachHelper(func, extent, coord); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Iterates over the index space of a tensor and calls a C++ lambda +template < + typename Func, ///< function applied to each point in a tensor's index space + int Rank> ///< rank of index space +void TensorForEachLambda(Coord extent, Func func) { + Coord coord; + detail::TensorForEachHelper(func, extent, coord); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct BlockForEach { + + /// Constructor performs the operation. + BlockForEach( + Element *ptr, + size_t capacity, + typename Func::Params params = typename Func::Params()) { + + Func func(params); + + for (size_t index = 0; index < capacity; ++index) { + ptr[index] = func(); + } + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_norm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..9d52b08bce3e8e0de802802cd98ffeb3af639163 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_norm.h @@ -0,0 +1,42 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + + +#include "cutlass/cutlass.h" + +// The contents of this file have been moved to 'tensor_reduce' to cover other types of reductions. + +#include "cutlass/util/reference/host/tensor_reduce.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..672e4d57948413519b2a033ae6bbf7e4b42985a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.h @@ -0,0 +1,203 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/tensor_ref.h" + +#include "cutlass/util/reference/detail/linear_to_coordinate.h" +#include "cutlass/core_io.h" + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side +/// workspace +template < + typename Element, + typename Layout, + typename ComputeType, + typename ReduceOp, + typename TransformOp +> +ComputeType TensorTransformReduce( + TensorView view, + ComputeType identity, + ReduceOp reduce, + TransformOp transform +) { + + for (int64_t idx = 0; idx < view.size(); ++idx) { + typename Layout::TensorCoord coord; + cutlass::reference::detail::LinearToCoordinate()(coord, idx, view.extent()); + + if (view.contains(coord)) { + Element x = view.at(coord); + identity = reduce(identity, transform(x)); + } + } + + return identity; +} + +/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side +/// workspace +template < + typename Element, + typename Layout, + typename ComputeType, + typename ReduceOp, + typename TransformOp +> +ComputeType TensorTransformReduce( + TensorView view_A, + TensorView view_B, + ComputeType identity, + ReduceOp reduce, + TransformOp transform) { + + if (view_A.extent() != view_B.extent()) { + throw std::runtime_error("Tensor extents must match."); + } + + for (int64_t idx = 0; idx < view_A.size(); ++idx) { + + typename Layout::TensorCoord coord; + cutlass::reference::detail::LinearToCoordinate()(coord, idx, view_A.extent()); + + if (view_A.contains(coord)) { + Element a = view_A.at(coord); + Element b = view_B.at(coord); + identity = reduce(identity, transform(a, b)); + } + } + + return identity; +} + +/// Helper to compute the sum of the elements of a tensor +template < + typename Element, + typename Layout, + typename ComputeType = Element +> +ComputeType TensorSum( + TensorView view, + ComputeType identity = ComputeType() +) { + + plus reduce; + NumericConverter transform; + + return TensorTransformReduce( + view, identity, reduce, transform); +} + +/// Helper to compute the sum of the squares of the elements of a tensor +template < + typename Element, + typename Layout, + typename ComputeType = Element +> +ComputeType TensorSumSq( + TensorView view, + ComputeType identity = ComputeType() +) { + + plus reduce; + magnitude_squared transform; + + return TensorTransformReduce( + view, identity, reduce, transform); +} + +/// Helper to compute the norm of the elements of a tensor. +template < + typename Element, + typename Layout, + typename ComputeType = double +> +ComputeType TensorNorm( + TensorView view, + ComputeType identity = ComputeType() +) { + + return std::sqrt(TensorSumSq(view, identity)); +} + +/// Helper to compute the sum of the squares of the differences of two tensors +template < + typename Element, + typename Layout, + typename ComputeType = double +> +ComputeType TensorSumSqDiff( + TensorView view_A, + TensorView view_B, + ComputeType identity = ComputeType() +) { + + plus reduce; + magnitude_squared_difference transform; + + return TensorTransformReduce( + view_A, view_B, identity, reduce, transform); +} + + +/// Helper to compute the norm of the tensor computed as the difference of two tensors in memory +template < + typename Element, + typename Layout, + typename ComputeType = double +> +ComputeType TensorNormDiff( + TensorView view_A, + TensorView view_B, + ComputeType identity = ComputeType() +) { + + return std::sqrt(TensorSumSqDiff(view_A, view_B, identity)); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.hpp new file mode 100644 index 0000000000000000000000000000000000000000..aadf60ac7eb6cbce3d309d0cea980917169d7d09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.hpp @@ -0,0 +1,203 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Provides several functions for filling tensors with data. +*/ + +#pragma once + +// Standard Library includes +#include +#include +#include + +// Cute includes +#include "cute/tensor.hpp" + +// Cutlass includes +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/quaternion.h" +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reference { +namespace host { + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Tensor reductions +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side +/// workspace +template < + typename Tensor, + typename ComputeType, + typename ReduceOp, + typename TransformOp +> +ComputeType TensorTransformReduce( + Tensor view, + ComputeType identity, + ReduceOp reduce, + TransformOp transform +) { + + for (int64_t idx = 0; idx < cute::size(view); ++idx) { + identity = reduce(identity, transform(view(idx))); + } + + return identity; +} + +/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side +/// workspace +template < + typename TensorA, + typename TensorB, + typename ComputeType, + typename ReduceOp, + typename TransformOp +> +ComputeType TensorTransformReduce( + TensorA view_A, + TensorB view_B, + ComputeType identity, + ReduceOp reduce, + TransformOp transform) { + + if (cute::size(view_A) != cute::size(view_B)) { + throw std::runtime_error("Tensor sizes must match."); + } + + for (int64_t idx = 0; idx < cute::size(view_A); ++idx) { + identity = reduce(identity, transform(view_A(idx), view_B(idx))); + } + + return identity; +} + +/// Helper to compute the sum of the elements of a tensor +template < + typename Tensor, + typename ComputeType = typename Tensor::value_type +> +ComputeType TensorSum( + Tensor view, + ComputeType identity = ComputeType() +) { + + plus reduce; + NumericConverter transform; + + return TensorTransformReduce( + view, identity, reduce, transform); +} + +/// Helper to compute the sum of the squares of the elements of a tensor +template < + typename Tensor, + typename ComputeType = typename Tensor::value_type +> +ComputeType TensorSumSq( + Tensor view, + ComputeType identity = ComputeType() +) { + + plus reduce; + magnitude_squared transform; + + return TensorTransformReduce( + view, identity, reduce, transform); +} + +/// Helper to compute the norm of the elements of a tensor. +template < + typename Tensor, + typename ComputeType = double +> +ComputeType TensorNorm( + Tensor view, + ComputeType identity = ComputeType() +) { + + return std::sqrt(TensorSumSq(view, identity)); +} + +/// Helper to compute the sum of the squares of the differences of two tensors +template < + typename TensorA, + typename TensorB, + typename ComputeType = double +> +ComputeType TensorSumSqDiff( + TensorA view_A, + TensorB view_B, + ComputeType identity = ComputeType() +) { + + plus reduce; + magnitude_squared_difference transform; + + return TensorTransformReduce( + view_A, view_B, identity, reduce, transform); +} + + +/// Helper to compute the norm of the tensor computed as the difference of two tensors in memory +template < + typename TensorA, + typename TensorB, + typename ComputeType = double +> +ComputeType TensorNormDiff( + TensorA view_A, + TensorB view_B, + ComputeType identity = ComputeType() +) { + + return std::sqrt(TensorSumSqDiff(view_A, view_B, identity)); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/trmm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/trmm.h new file mode 100644 index 0000000000000000000000000000000000000000..0c931ee76e86741526ca61481828e722033a611c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/trmm.h @@ -0,0 +1,215 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for TRMM in host-side code. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/arch/mma.h" +#include "cutlass/util/host_tensor.h" + +#include "cutlass/util/reference/host/gemm.h" + +namespace cutlass { +namespace reference { +namespace host { + +/// Computes a Triangular Matrix Multiplication (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename ElementA, + typename LayoutA, + SideMode SideModeA, + FillMode FillModeA, + DiagType DiagTypeA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_trmm( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + TensorRef tensor_d, + ComputeType initial_accum) { + + static_assert( + LayoutA::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + static_assert(SideModeA != SideMode::kInvalid + , "Side Mode can either be Left or Right."); + + static_assert(FillModeA == FillMode::kLower || FillModeA == FillMode::kUpper + , "Fill Mode can either be Lower or Upper."); + + using CompareOp = typename TrMatrixCompareOp::Type; + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + // Assuming correct k-dimension value is passed + int const K = problem_size.k(); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + CompareOp compare_op; + + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) { + ElementA a = ElementA(); + ElementB b = ElementB(); + + if (SideModeA == SideMode::kLeft) { + a = (compare_op(row, k_block)) ? + (tensor_a.at(MatrixCoord(row, k_block))) : ElementA(0); + if (row == k_block && DiagTypeA == DiagType::kUnit) { + a = ElementA(1); + } + b = tensor_b.at(MatrixCoord(k_block, col)); + } else if (SideModeA == SideMode::kRight) { + a = tensor_b.at(MatrixCoord(row, k_block)); + b = (compare_op(k_block, col)) ? + tensor_a.at(MatrixCoord(k_block, col)) : ElementA(0); + if (k_block == col && DiagTypeA == DiagType::kUnit) { + b = ElementA(1); + } + } + + ComputeType compute_a(cast_if_scalar(a)); + ComputeType compute_b(cast_if_scalar(b)); + + accum[i][j] = inner_product_op(compute_a, compute_b, accum[i][j]); + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j])); + } + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA, + typename LayoutA, + SideMode SideModeA, + FillMode FillModeA, + DiagType DiagTypeA, + typename ElementB, + typename LayoutB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = cutlass::arch::OpMultiplyAdd +> +struct Trmm; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct Trmm { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_trmm>( + problem_size, alpha, tensor_a, tensor_b, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/trmm_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/trmm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..455c8a92ec8c020006c18e6564ef91d9fef9078c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/trmm_complex.h @@ -0,0 +1,262 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Reference implementation for complex-valued TRMM in host-side code. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/tensor_view.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/util/reference/host/gemm.h" + +namespace cutlass { +namespace reference { +namespace host { + +/// Computes a Triangular Matrix Multiplication (tensors of rank=2) pointed to by TensorRef +/// objects. +template < + typename ElementA, + typename LayoutA, + ComplexTransform TransformA, + SideMode SideModeA, + FillMode FillModeA, + DiagType DiagTypeA, + typename ElementB, + typename LayoutB, + ComplexTransform TransformB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = multiply_add, + typename ConvertOp = NumericConverter +> +void compute_trmm_complex( + gemm::GemmCoord problem_size, + ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + TensorRef tensor_d, + ComputeType initial_accum) { + + static_assert( + LayoutA::kRank == 2 && + LayoutC::kRank == 2, "Tensors must be of rank 2"); + + static_assert(SideModeA != SideMode::kInvalid + , "Side Mode can either be Left or Right."); + + static_assert(FillModeA == FillMode::kLower || FillModeA == FillMode::kUpper + , "Fill Mode can either be Lower or Upper."); + + using CompareOp = typename TrMatrixCompareOp::Type; + + // Note: batch is ignored. + int const M = problem_size.m(); + int const N = problem_size.n(); + // Assuming correct k-dimension value is passed + int const K = problem_size.k(); + + // Blocking necessary to speedup reference implementation + int const Mblock = 16; + int const Nblock = 16; + + ConvertOp convert_op; + InnerProductOp inner_product_op; + CompareOp compare_op; + + for (int row_block = 0; row_block < M; row_block += Mblock) { + for (int col_block = 0; col_block < N; col_block += Nblock) { + + ComputeType accum[Mblock][Nblock]; + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + accum[i][j] = initial_accum; + } + } + + for (int k_block = 0; k_block < K; ++k_block) { + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + if (row < M && col < N) { + ElementA a = ElementA(); + ElementB b = ElementB(); + + if (SideModeA == SideMode::kLeft) { + a = (compare_op(row, k_block)) ? + (tensor_a.at(MatrixCoord(row, k_block))) : ElementA(0); + if (row == k_block && DiagTypeA == DiagType::kUnit) { + a = ElementA(1); + } + b = tensor_b.at(MatrixCoord(k_block, col)); + } else if (SideModeA == SideMode::kRight) { + a = tensor_b.at(MatrixCoord(row, k_block)); + b = (compare_op(k_block, col)) ? + tensor_a.at(MatrixCoord(k_block, col)) : ElementA(0); + if (k_block == col && DiagTypeA == DiagType::kUnit) { + b = ElementA(1); + } + } + + ComputeType a_ik = ComputeType(a); + ComputeType b_kj = ComputeType(b); + + // Conjugate, and hence hermitian, is only allowed for the triangular matrix + if (SideModeA == SideMode::kLeft && TransformA == ComplexTransform::kConjugate) { + a_ik = conj(a_ik); + } else if (SideModeA == SideMode::kRight && TransformA == ComplexTransform::kConjugate) { + b_kj = conj(b_kj); + } + + accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]); + } + } + } + } + + for (int j = 0; j < Nblock; j++) { + for (int i = 0; i < Mblock; i++) { + int row = row_block + i; + int col = col_block + j; + + MatrixCoord coord = MatrixCoord(row, col); + + if (row < M && col < N) { + tensor_d.at(coord) = convert_op( + alpha * ScalarType(accum[i][j])); + } + } + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA, + typename LayoutA, + ComplexTransform TransformA, + SideMode SideModeA, + FillMode FillModeA, + DiagType DiagTypeA, + typename ElementB, + typename LayoutB, + ComplexTransform TransformB, + typename ElementC, + typename LayoutC, + typename ScalarType, + typename ComputeType, + typename InnerProductOp = cutlass::arch::OpMultiplyAddComplex +> +struct TrmmComplex; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiply-add +template +struct TrmmComplex { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_trmm_complex>( + problem_size, alpha, tensor_a, tensor_b, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for gaussian multiply-add +template +struct TrmmComplex { + + void operator()(gemm::GemmCoord problem_size, ScalarType alpha, + TensorRef tensor_a, + TensorRef tensor_b, + TensorRef tensor_d, + ComputeType initial_accum = ComputeType(0)) { + static_assert( + LayoutA::kRank == 2 && LayoutC::kRank == 2, + "Tensors must be of rank 2"); + + compute_trmm_complex>( + problem_size, alpha, tensor_a, tensor_b, tensor_d, initial_accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace host +} // namespace reference +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/tensor_view_io.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/tensor_view_io.h new file mode 100644 index 0000000000000000000000000000000000000000..51e47b92de88d9e0139a4242b2c0070f3725cfe8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/tensor_view_io.h @@ -0,0 +1,270 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +**************************************************************************************************/ +#pragma once + +#include "cutlass/core_io.h" +#include "cutlass/tensor_view.h" +#include "cutlass/tensor_view_planar_complex.h" +#include "cutlass/complex.h" + +namespace cutlass { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Helper to write the least significant rank of a TensorView +template < + typename Element, + typename Layout +> +inline std::ostream & TensorView_WriteLeastSignificantRank( + std::ostream& out, + TensorView const& view, + Coord const &start_coord, + int rank, + std::streamsize width) { + + for (int idx = 0; idx < view.extent(rank); ++idx) { + + Coord coord(start_coord); + coord[rank] = idx; + + if (idx) { + out.width(0); + out << ", "; + } + if (idx || coord) { + out.width(width); + } + out << ScalarIO(view.at(coord)); + } + + return out; +} + +/// Helper to write a rank of a TensorView +template < + typename Element, + typename Layout +> +inline std::ostream & TensorView_WriteRank( + std::ostream& out, + TensorView const& view, + Coord const &start_coord, + int rank, + std::streamsize width) { + + // If called on the least significant rank, write the result as a row + if (rank + 1 == Layout::kRank) { + return TensorView_WriteLeastSignificantRank(out, view, start_coord, rank, width); + } + + // Otherwise, write a sequence of rows and newlines + for (int idx = 0; idx < view.extent(rank); ++idx) { + + Coord coord(start_coord); + coord[rank] = idx; + + if (rank + 2 == Layout::kRank) { + // Write least significant ranks asa matrix with rows delimited by "\n" + if (idx) { + out << ",\n"; + } + TensorView_WriteLeastSignificantRank(out, view, coord, rank + 1, width); + } + else { + // Higher ranks are separated by newlines + if (idx) { + out << ",\n\n"; + } + TensorView_WriteRank(out, view, coord, rank + 1, width); + } + } + + return out; +} + +/// Helper to write the least significant rank of a TensorView +template < + typename Element, + typename Layout +> +inline std::ostream & TensorViewPlanarComplex_WriteLeastSignificantRank( + std::ostream& out, + TensorViewPlanarComplex const& view, + Coord const &start_coord, + int rank, + std::streamsize width) { + + for (int idx = 0; idx < view.extent(rank); ++idx) { + + Coord coord(start_coord); + coord[rank] = idx; + + if (idx) { + out.width(0); + out << ", "; + } + if (idx || coord) { + out.width(width); + } + + complex x = view.at(coord); + out << x; + } + + return out; +} + +/// Helper to write a rank of a TensorView +template < + typename Element, + typename Layout +> +inline std::ostream & TensorViewPlanarComplex_WriteRank( + std::ostream& out, + TensorViewPlanarComplex const& view, + Coord const &start_coord, + int rank, + std::streamsize width) { + + // If called on the least significant rank, write the result as a row + if (rank + 1 == Layout::kRank) { + return TensorViewPlanarComplex_WriteLeastSignificantRank(out, view, start_coord, rank, width); + } + + // Otherwise, write a sequence of rows and newlines + for (int idx = 0; idx < view.extent(rank); ++idx) { + + Coord coord(start_coord); + coord[rank] = idx; + + if (rank + 2 == Layout::kRank) { + // Write least significant ranks asa matrix with rows delimited by ";\n" + if (idx) { + out << ";\n"; + } + TensorViewPlanarComplex_WriteLeastSignificantRank(out, view, coord, rank + 1, width); + } + else { + // Higher ranks are separated by newlines + if (idx) { + out << "\n"; + } + TensorViewPlanarComplex_WriteRank(out, view, coord, rank + 1, width); + } + } + + return out; +} + +} // namespace detail + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Prints human-readable representation of a TensorView to an ostream +template < + typename Element, + typename Layout +> +inline std::ostream& TensorViewWrite( + std::ostream& out, + TensorView const& view) { + + // Prints a TensorView according to the following conventions: + // - least significant rank is printed as rows separated by ";\n" + // - all greater ranks are delimited with newlines + // + // The result is effectively a whitespace-delimited series of 2D matrices. + + return detail::TensorView_WriteRank(out, view, Coord(), 0, out.width()); +} + +/// Prints human-readable representation of a TensorView to an ostream +template < + typename Element, + typename Layout +> +inline std::ostream& operator<<( + std::ostream& out, + TensorView const& view) { + + // Prints a TensorView according to the following conventions: + // - least significant rank is printed as rows separated by ";\n" + // - all greater ranks are delimited with newlines + // + // The result is effectively a whitespace-delimited series of 2D matrices. + + return TensorViewWrite(out, view); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Prints human-readable representation of a TensorView to an ostream +template < + typename Element, + typename Layout +> +inline std::ostream& TensorViewWrite( + std::ostream& out, + TensorViewPlanarComplex const& view) { + + // Prints a TensorView according to the following conventions: + // - least significant rank is printed as rows separated by ";\n" + // - all greater ranks are delimited with newlines + // + // The result is effectively a whitespace-delimited series of 2D matrices. + + return detail::TensorViewPlanarComplex_WriteRank(out, view, Coord(), 0, out.width()); +} + +/// Prints human-readable representation of a TensorView to an ostream +template < + typename Element, + typename Layout +> +inline std::ostream& operator<<( + std::ostream& out, + TensorViewPlanarComplex const& view) { + + // Prints a TensorView according to the following conventions: + // - least significant rank is printed as rows separated by ";\n" + // - all greater ranks are delimited with newlines + // + // The result is effectively a whitespace-delimited series of 2D matrices. + + return TensorViewWrite(out, view); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/type_traits.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/type_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..f187b978f97137d6e9b61b538cc50f43e400ec93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/tools/util/include/cutlass/util/type_traits.h @@ -0,0 +1,238 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Type traits for common CUDA types +*/ + +#pragma once + +#include +#include +#include + +#include "cutlass/numeric_types.h" +#include "cutlass/complex.h" + +namespace cutlass { +struct half_t; + +template +struct TypeTraits { + typedef T host_type; + typedef T device_type; + static inline T remove_negative_zero(T x) { return x; } + static inline T to_print(T x) { return x; } + static inline device_type to_device(host_type x) { return x; } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_8I; + typedef int8_t host_type; + typedef int8_t device_type; + typedef int8_t integer_type; + typedef uint8_t unsigned_type; + static inline int8_t remove_negative_zero(int8_t x) { return x; } + static inline int to_print(int8_t x) { return (int)x; } + static inline device_type to_device(host_type x) { return x; } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_8I; + typedef uint8_t host_type; + typedef uint8_t device_type; + typedef uint8_t integer_type; + typedef uint8_t unsigned_type; + static inline uint8_t remove_negative_zero(uint8_t x) { return x; } + static inline uint32_t to_print(uint8_t x) { return (uint32_t)x; } + static inline device_type to_device(host_type x) { return x; } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_32I; + typedef int host_type; + typedef int device_type; + typedef int32_t integer_type; + typedef uint32_t unsigned_type; + static inline int32_t remove_negative_zero(int32_t x) { return x; } + static inline int to_print(int x) { return x; } + static inline device_type to_device(host_type x) { return x; } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_32I; + typedef unsigned host_type; + typedef unsigned device_type; + typedef uint32_t integer_type; + typedef uint32_t unsigned_type; + static inline uint32_t remove_negative_zero(uint32_t x) { return x; } + static inline uint32_t to_print(uint32_t x) { return x; } + static inline device_type to_device(host_type x) { return x; } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_8I; + typedef int64_t host_type; + typedef int64_t device_type; + typedef int64_t integer_type; + typedef uint64_t unsigned_type; + static inline int64_t remove_negative_zero(int64_t x) { return x; } + static inline int64_t to_print(int64_t x) { return x; } + static inline device_type to_device(host_type x) { return x; } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_8I; + typedef uint64_t host_type; + typedef uint64_t device_type; + typedef uint64_t integer_type; + typedef uint64_t unsigned_type; + static inline uint64_t remove_negative_zero(uint64_t x) { return x; } + static inline uint64_t to_print(uint64_t x) { return x; } + static inline device_type to_device(host_type x) { return x; } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_16F; + typedef half_t host_type; + typedef half_t device_type; + typedef int16_t integer_type; + typedef uint16_t unsigned_type; + static inline half_t remove_negative_zero(half_t x) { + return (x.raw() == 0x8000 ? half_t::bitcast(0) : x); + } + static inline half_t to_print(half_t x) { return x; } + static inline device_type to_device(half_t x) { return reinterpret_cast(x); } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_32F; + typedef float host_type; + typedef float device_type; + typedef int32_t integer_type; + typedef uint32_t unsigned_type; + static inline float remove_negative_zero(float x) { return x == -0.f ? 0.f : x; } + static inline float to_print(float x) { return x; } + static inline device_type to_device(host_type x) { return x; } +}; + +template <> +struct TypeTraits { + static cudaDataType_t const cublas_type = CUDA_R_64F; + typedef double host_type; + typedef double device_type; + typedef int64_t integer_type; + typedef uint64_t unsigned_type; + static inline double remove_negative_zero(double x) { return x == -0.0 ? 0.0 : x; } + static inline double to_print(double x) { return x; } + static inline device_type to_device(host_type x) { return x; } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Complex types +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template <> +struct TypeTraits > { + static cudaDataType_t const cublas_type = CUDA_C_16F; + typedef complex host_type; + typedef complex device_type; + typedef int16_t integer_type; + typedef uint16_t unsigned_type; + static inline device_type to_device(complex x) { return reinterpret_cast(x); } +}; + +template <> +struct TypeTraits > { + static cudaDataType_t const cublas_type = CUDA_C_16F; + typedef complex host_type; + typedef complex device_type; + typedef int16_t integer_type; + typedef uint16_t unsigned_type; + static inline complex remove_negative_zero(complex x) { + return complex( + real(x) == -0_hf ? 0_hf : real(x), + imag(x) == -0_hf ? 0_hf : imag(x) + ); + } + static inline complex to_print(complex x) { return x; } + static inline device_type to_device(complex x) { return reinterpret_cast(x); } +}; + +template <> +struct TypeTraits > { + + static cudaDataType_t const cublas_type = CUDA_C_32F; + typedef complex host_type; + typedef complex device_type; + typedef int64_t integer_type; + typedef uint64_t unsigned_type; + + static inline complex remove_negative_zero(complex x) { + return complex( + real(x) == -0.f ? 0.f : real(x), + imag(x) == -0.f ? 0.f : imag(x) + ); + } + + static inline complex to_print(complex x) { return x; } + static inline device_type to_device(complex x) { return reinterpret_cast(x); } +}; + +template <> +struct TypeTraits > { + static cudaDataType_t const cublas_type = CUDA_C_64F; + typedef complex host_type; + typedef complex device_type; + struct integer_type { int64_t real, imag; }; + struct unsigned_type { uint64_t real, imag; }; + static inline complex remove_negative_zero(complex x) { + return complex( + real(x) == -0.0 ? 0.0 : real(x), + imag(x) == -0.0 ? 0.0 : imag(x) + ); + } + static inline complex to_print(complex x) { return x; } + static inline device_type to_device(complex x) { return reinterpret_cast(x); } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/CONTRIBUTORS.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/CONTRIBUTORS.md new file mode 100644 index 0000000000000000000000000000000000000000..eff9862a8deb68f8a0aba03347d9f84562ca382d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/CONTRIBUTORS.md @@ -0,0 +1,238 @@ + + + + + + + + + + + + + + + + + +TVM Contributors +================ +TVM adopts the Apache way and governs by merit. We believe that it is important to create an inclusive community where everyone can use, +contribute to, and influence the direction of the project. We actively invite contributors who have earned the merit to be part of the development community. + +See the [community structure document](https://tvm.apache.org/docs/contribute/community.html) for the explanation of community structure and contribution guidelines. + + +## Committers + +We add tag along with committer name to show areas that they are familiar with. +We do encourage everyone to work anything they are interested in. + +- [Aditya Atluri](https://github.com/adityaatluri): @adityaatluri - rocm +- [Matthew Barrett](https://github.com/mbaret): @mbaret - byoc, arm +- [Matthew Brookhart](https://github.com/mbrookhart): @mbrookhart - relay, frontends +- [Yaxing Cai](https://github.com/cyx-6): @cyx-6 - tvm-script, runtime +- [Liangfu Chen](https://github.com/liangfu): @liangfu - vta, chisel, intel FPGA, c runtime +- [Tianqi Chen](https://github.com/tqchen) (PMC): @tqchen - topi, compiler, relay, docs +- [Wei Chen](https://github.com/wweic): @wweic - runtime, relay, vm +- [Zhi Chen](https://github.com/zhiics) (PMC): @zhiics - relay, quantization, pass manager +- [Egor Churaev](https://github.com/echuraev): @echuraev - metal, opencl, adreno +- [Siyuan Feng](https://github.com/Hzfengsy) (PMC): @Hzfengsy - tir +- [Josh Fromm](https://github.com/jwfromm) (PMC): @jwfromm - frontends, quantization, topi +- [Mehrdad Hessar](https://github.com/mehrdadh): @mehrdadh - microTVM, hexagon +- [Bohan Hou](https://github.com/spectrometerHBH) (PMC): @spectrometerHBH - tir, arith, tvm-script +- [Yuwei Hu](https://github.com/Huyuwei): @Huyuwei - topi, frontends +- [Luke Hutton](https://github.com/lhutton1): @lhutton1 - ethos-u, arm +- [Nick Hynes](https://github.com/nhynes): @nhynes: - sgx, rust +- [Animesh Jain](https://github.com/anijain2305): @anijain2305 - quantization, relay +- [Chenfan Jia](https://github.com/jcf94): @jcf94 - auto_scheduler +- [Ziheng Jiang](https://github.com/ZihengJiang) (PMC): @ZihengJiang - relay, compiler +- [Hongyi Jin](https://github.com/jinhongyii): @jinhongyii - tir, tvm-script, arith, relay, topi +- [Manupa Karunaratne](https://github.com/manupak): @manupak - ethos-u, memory planner +- [Elen Kalda](https://github.com/ekalda): @ekalda - ethos-u, arm +- [Marisa Kirisame](https://github.com/MarisaKirisame): @MarisaKirisame - relay +- [Tristan Konolige](https://github.com/tkonolige): @tkonolige - profiling, relay, tir, runtime +- [Ruihang Lai](https://github.com/MasterJH5574) (PMC): @MasterJH5574 - tir, tvm-script +- [Wuwei Lin](https://github.com/vinx13) (PMC): @vinx13 - relay, topi, tir, meta_schedule +- [Yizhi Liu](https://github.com/yzhliu) (PMC): @yzhliu - jvm, topi, relay +- [Hao Lu](https://github.com/hlu1): @hlu1 - nnpack, frontends +- [Eric Lunderberg](https://github.com/Lunderberg): @Lunderberg - CI, Vulkan backend +- [Andrew Z. Luo](https://github.com/AndrewZhaoLuo): @AndrewZhaoLuo - amp, relay, frontends +- [Steven Lyubomirsky](https://github.com/slyubomirsky): @slyubomirsky - relay +- [Masahiro Masuda](https://github.com/masahi) (PMC): @masahi - topi, relay +- [Thierry Moreau](https://github.com/tmoreau89) (PMC): @tmoreau89 - vta +- [Kazutaka Morita](https://github.com/kazum): @kazum - frontends, opencl +- [Trevor Morris](https://github.com/trevor-m): @trevor-m - byoc, compiler +- [Leandro Nunes](https://github.com/leandron) (PMC): @leandron - tvmc +- [Lily Orth-Smith](https://github.com/electriclilies): @electriclilies - relay +- [Ashutosh Parkhi](https://github.com/ashutosh-arm): @ashutosh-arm - cmsis-nn +- [Krzysztof Parzyszek](https://github.com/kparzysz-quic) (PMC): @kparzysz-quic - hexagon, llvm +- [Andrew Reusch](https://github.com/areusch): (PMC) @areusch - runtime, microTVM +- [David Riazati](https://github.com/driazati): @driazati - ci, community +- [Jared Roesch](https://github.com/jroesch) (PMC): @jroesch - relay +- [Gustavo Romero](https://github.com/gromero): @gromero - microtvm, tvmc +- [Giuseppe Rossini](https://github.com/giuseros): @giuseros - aot, arm +- [Siju Samuel](https://github.com/siju-samuel): @siju-samuel - frontends +- [Christopher Sidebottom](https://github.com/Mousius): @Mousius - arm, ethos-u, relay +- [Junru Shao](https://github.com/junrushao) (PMC): @junrushao - relay, compiler +- [Haichen Shen](https://github.com/icemelon) (PMC): @icemelon - relay, topi +- [Chris Sullivan](https://github.com/csullivan): @csullivan - amd backend +- [Siva Rama Krishna Reddy](https://github.com/srkreddy1238): @srkreddy1238 - frontends, golang +- [Zhixun Tan](https://github.com/phisiart): @phisiart - opengl, web +- [Andrew Tulloch](https://github.com/ajtulloch): @ajtulloch - topi, compiler, runtime +- [Gavin Uberti](https://github.com/guberti): @guberti - microtvm, arm +- [Luis Vega](https://github.com/vegaluisjose): @vegaluisjose - vta, chisel +- [Leyuan Wang](https://github.com/Laurawly) (PMC): @Laurawly: - topi +- [Yao Wang](https://github.com/kevinthesun): @kevinthesun (PMC): - topi, vision +- [Jian Weng](https://github.com/were): @were: - hybrid script +- [Zhao Wu](https://github.com/FrozenGene): @FrozenGene - runtime, topi, frontends +- [Eddie Yan](https://github.com/eqy) (PMC): @eqy - runtime, autotvm, rpc, topi +- [Zihao Ye](https://github.com/yzh119): @yzh119 - tir +- [Hao Yu](https://github.com/comaniac): @comaniac (PMC) - relay, byoc, auto_scheduler +- [Shuai Yuan](https://github.com/ysh329): @ysh329 - ci +- [Qiang Zhang](https://github.com/Johnson9009): @Johnson9009 - relay, tvm-script +- [Lianmin Zheng](https://github.com/merrymercy) (PMC): @merrymercy - autotvm, auto_scheduler, topi, relay +- [Xiyou Zhou](https://github.com/zxybazh): @zxybazh - relay +- [wrongtest](https://github.com/wrongtest-intellif) (PMC): @wrongtest-intellif - tir, tvm-script, arith +- [Anirudh Sundar Subramaniam](https://github.com/quic-sanirudh): @quic-sanirudh + +## Reviewers + +- [Aditya Atluri](https://github.com/adityaatluri): @adityaatluri +- [Matthew Barrett](https://github.com/mbaret): @mbaret +- [Arnaud Bergeron](https://github.com/abergeron): @abergeron +- [Florin Blanaru](https://github.com/gigiblender): @gigiblender +- [Matthew Brookhart](https://github.com/mbrookhart): @mbrookhart +- [Yaxing Cai](https://github.com/cyx-6): @cyx-6 +- [Liangfu Chen](https://github.com/liangfu): @liangfu +- [Tianqi Chen](https://github.com/tqchen): @tqchen +- [Zhi Chen](https://github.com/zhiics): @zhiics +- [Valery Chernov](https://github.com/vvchernov): @vvchernov +- [Neo Chien](https://github.com/cchung100m): @cchung100m +- [Christian Convey](https://github.com/cconvey/): @cconvey +- [Meghan Cowan](https://github.com/cowanmeg): @cowanmeg +- [Balint Cristian](https://github.com/cbalint13): @cbalint13 +- [Egor Churaev](https://github.com/echuraev): @echuraev +- [Xiaoqiang Dan](https://github.com/xqdan): @xqdan +- [Yixin Dong](https://github.com/Ubospica) @Ubospica +- [Haozheng Fan](https://github.com/hzfan): @hzfan +- [Siyuan Feng](https://github.com/Hzfengsy): @Hzfengsy +- [Josh Fromm](https://github.com/jwfromm): @jwfromm +- [Alexey Gladyshev](https://github.com/KJlaccHoeUM9l): @KJlaccHoeUM9l +- [Sergei Grechanik](https://github.com/sgrechanik-h): @sgrechanik-h +- [Altan Haan](https://github.com/altanh): @altanh +- [Mehrdad Hessar](https://github.com/mehrdadh): @mehrdadh +- [Bohan Hou](https://github.com/spectrometerHBH): @spectrometerHBH +- [Yuwei Hu](https://github.com/Huyuwei): @Huyuwei +- [Luke Hutton](https://github.com/lhutton1): @lhutton1 +- [Nick Hynes](https://github.com/nhynes): @nhynes +- [Animesh Jain](https://github.com/anijain2305): @anijain2305 +- [Chenfan Jia](https://github.com/jcf94): @jcf94 +- [Hua Jiang](https://github.com/huajsj): @huajsj +- [Ziheng Jiang](https://github.com/ZihengJiang): @ZihengJiang +- [Hongyi Jin](https://github.com/jinhongyii): @jinhongyii +- [Manupa Karunaratne](https://github.com/manupak): @manupak +- [Elen Kalda](https://github.com/ekalda): @ekalda +- [Marisa Kirisame](https://github.com/MarisaKirisame): @MarisaKirisame +- [Michael J. Klaiber](https://github.com/MichaelJKlaiber/) @MichaelJKlaiber +- [Noah Kontur](https://github.com/konturn/) @konturn +- [Tristan Konolige](https://github.com/tkonolige): @tkonolige +- [Mohamad Katanbaf](https://github.com/mkatanbaf): @mkatanbaf +- [Denise Kutnick](https://github.com/denise-k): @denise-k +- [Ruihang Lai](https://github.com/MasterJH5574): @MasterJH5574 +- [Nicola Lancellotti](https://github.com/nicolalancellotti): @NicolaLancellotti +- [Wuwei Lin](https://github.com/vinx13): @vinx13 +- [Andrew Liu](https://github.com/hypercubestart): @hypercubestart +- [Henry Liu](https://github.com/optima2005): @optima2005 +- [Xin Liu](https://github.com/Meteorix): @Meteorix +- [Yizhi Liu](https://github.com/yzhliu) : @yzhliu +- [Hao Lu](https://github.com/hlu1): @hlu1 +- [Eric Lunderberg](https://github.com/Lunderberg): @Lunderberg +- [Andrew Z. Luo](https://github.com/AndrewZhaoLuo): @AndrewZhaoLuo +- [Steven Lyubomirsky](https://github.com/slyubomirsky): @slyubomirsky +- [Alan MacDonald](https://github.com/alanmacd): @alanmacd +- [Masahiro Masuda](https://github.com/masahi): @masahi +- [Andrey Malyshev](https://github.com/elvin-n): @elvin-n +- [Sergey Mironov](https://github.com/grwlf): @grwlf +- [Thierry Moreau](https://github.com/tmoreau89): @tmoreau89 +- [Kazutaka Morita](https://github.com/kazum): @kazum +- [Trevor Morris](https://github.com/trevor-m): @trevor-m +- [Tatsuya Nishiyama](https://github.com/nishi-t): @nishi-t +- [Leandro Nunes](https://github.com/leandron): @leandron +- [Jiawei Liu](https://github.com/ganler): @ganler +- [Lily Orth-Smith](https://github.com/electriclilies): @electriclilies +- [Wei Pan](https://github.com/wpan11nv): @wpan11nv +- [Michalis Papadimitriou](https://github.com/mikepapadim): @mikepapadim +- [Krzysztof Parzyszek](https://github.com/kparzysz-quic): @kparzysz-quic +- [Sunghyun Park](https://github.com/sunggg): @sunggg +- [Ashutosh Parkhi](https://github.com/ashutosh-arm): @ashutosh-arm +- [Alexander Peskov](https://github.com/apeskov): @apeskov +- [Pariksheet Pinjari](https://github.com/PariksheetPinjari909): @PariksheetPinjari909 +- [Josh Pollock](https://github.com/joshpoll): @joshpoll +- [Ramana Radhakrishnan](https://github.com/u99127): @u99127 +- [Andrew Reusch](https://github.com/areusch): @areusch +- [David Riazati](https://github.com/driazati): @driazati +- [Jared Roesch](https://github.com/jroesch): @jroesch +- [Gustavo Romero](https://github.com/gromero): @gromero +- [Giuseppe Rossini](https://github.com/giuseros): @giuseros +- [Siju Samuel](https://github.com/siju-samuel): @siju-samuel +- [Janet Schneider](https://github.com/janetsc): @janetsc +- [Junru Shao](https://github.com/junrushao): @junrushao +- [Haichen Shen](https://github.com/icemelon): @icemelon +- [Qingchao Shen](https://github.com/jikechao): @jikechao +- [Xingjian Shi](https://github.com/sxjscience): @sxjscience +- [Yuanjing Shi](https://github.com/shingjan): @shingjan +- [Mark Shields](https://github.com/mbs-octoml): @mbs-octoml +- [Christopher Sidebottom](https://github.com/mousius): @mousius +- [Siva Rama Krishna Reddy](https://github.com/srkreddy1238): @srkreddy1238 +- [Dmitriy Smirnov](https://github.com/d-smirnov): @d-smirnov +- [Jon Soifer](https://github.com/soiferj): @soiferj +- [Adam Straw](https://github.com/adstraw): @adstraw +- [Chris Sullivan](https://github.com/csullivan): @csullivan +- [Anirudh Sundar Subramaniam](https://github.com/quic-sanirudh): @quic-sanirudh +- [Zhixun Tan](https://github.com/phisiart): @phisiart +- [Andrew Tulloch](https://github.com/ajtulloch): @ajtulloch +- [Jorn Tuyls](https://github.com/jtuyls): @jtuyls +- [Gavin Uberti](https://github.com/guberti): @guberti +- [Luis Vega](https://github.com/vegaluisjose): @vegaluisjose +- [Jyotsna Verma](https://github.com/jverma-quic): @jverma-quic +- [Thomas Viehmann](https://github.com/t-vi): @t-vi +- [An Wang](https://github.com/anwang2009): @anwang2009 +- [Yao Wang](https://github.com/kevinthesun): @kevinthesun +- [Yuchen Wang](https://github.com/wyc-ruiker): @wyc-ruiker +- [Leyuan Wang](https://github.com/Laurawly): @Laurawly +- [Alex Weaver](https://github.com/alex-weaver): @alex-weaver +- [Logan Weber](https://github.com/weberlo): @weberlo +- [Matt Welsh](https://github.com/mdw-octoml): @mdw-octoml +- [Cheng Wen](https://github.com/chengven027-intellif): @chengven027-intellif +- [Jian Weng](https://github.com/were): @were +- [wrongtest](https://github.com/wrongtest-intellif): @wrongtest-intellif +- [Yong Wu](https://github.com/yongwww): @yongwww +- [Zhao Wu](https://github.com/FrozenGene): @FrozenGene +- [Bing Xu](https://github.com/antinucleon): @antinucleon +- [Eddie Yan](https://github.com/eqy): @eqy +- [Aleksei Yazev](https://github.com/Aleksei-grovety): @Aleksei-grovety +- [Zihao Ye](https://github.com/yzh119): @yzh119 +- [Hao Yu](https://github.com/comaniac): @comaniac +- [Shuai Yuan](https://github.com/ysh329): @ysh329 +- [Joshua Z. Zhang](https://github.com/zhreshold): @zhreshold +- [Lianmin Zheng](https://github.com/merrymercy): @merrymercy +- [Min Chen](https://github.com/multiverstack-intellif): @multiverstack-intellif +- [Xiyou Zhou](https://github.com/zxybazh): @zxybazh +- [@blackkker](https://github.com/blackkker): @blackkker +- [Jiajun Jiang](https://github.com/jiangjiajun): @jiangjiajun +- [Qiang Zhang](https://github.com/Johnson9009): @Johnson9009 + +## List of Contributors +- [Full List of Contributors](https://github.com/apache/tvm/graphs/contributors) + +## Mentors + +TVM is now a top-level Apache project. During our Incubator phase, we were fortunate to have the following mentors. + +- Markus Weimer @markusweimer +- Sebastian Schelter @sscdotopen +- Byung-Gon Chun @bgchun +- Henry Saputra @hsaputra +- Timothy Chen @tnachen +- Furkan KAMACI @kamaci diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/KEYS b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/KEYS new file mode 100644 index 0000000000000000000000000000000000000000..c5297eb911c9b7beaede1c2800bd4442a9c405c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/KEYS @@ -0,0 +1,762 @@ +This file contains the PGP keys of various developers. +Please don't use them for email unless you have to. Their main +purpose is code signing. + +Examples of importing this file in your keystore: + gpg --import KEYS.txt + (need pgp and other examples here) + +Examples of adding your key to this file: + pgp -kxa and append it to this file. + (pgpk -ll && pgpk -xa ) >> this file. + (gpg --list-sigs + && gpg --armor --export ) >> this file. + +----------------------------------------------------------------------------------- +pub rsa4096 2019-11-15 [SC] + EF52D68AD5276994249816836754EA97C55E3DEB +uid [ultimate] Tianqi Chen (CODE SIGNING KEY) +sig 3 6754EA97C55E3DEB 2019-11-15 Tianqi Chen (CODE SIGNING KEY) +sub rsa4096 2019-11-15 [E] +sig 6754EA97C55E3DEB 2019-11-15 Tianqi Chen (CODE SIGNING KEY) + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF3OK24BEADD4hxjrsgb4jIDIACHS15X+5YP/YaUF5UDDQs/bNn/xGJGVl4/ +4sJ6qKZcvMDrWTmnNItYBuaHi1qhGvlcASBekm/9PU2U8lZmAF1lZkKIIYZkX+If +s8PEYurE8cDr65orrdsFF8Zwb+u6x+gMsHNivsU2Kn3xbQjGmeW44UA+aaXzcJp6 +sVk3aX5DypoYJNBmbASyOjZVWkcrJ+NKEfJ1dKtka5/siqOjuvCd8NT5dJVhZbm3 +Sf8iclEMqog1LhdI/FhE2fB3C5hJkzcinq2v55qDaGqsL+qgT7agf9b4t0EgjbVh +cs6jlCglad+Oz27BQIjt06HE1OB5T/Gxa080FK4JZMpxZJ5tDA2/7DQM2MyN84z/ +s62JuBJnsrzr4w8D/QcAyzAmyzAqvxLR/aqLgJTIcQiw6AenHovKkNbEQOBYE2T5 +ms7uVO2E2Tv42J4Te4OKhpId9mK+7elCLvOb2DfAJDdYxDN9c8dJTls+G6xmv0h9 +bb2+QRjkpDiFeu1hKNEe0/ST/YXDfRYpKl+1t/QZ+JccLgEdEwuo/IQ1e4POH2h0 +Zqvy7TR5obeTf0TvmLzW+i3s1oUkmSAnQEncSGnGnlugYk0BLuMMi9Fhx6qcC5pC +cA3nsRqFKebtnpop+m+psFkmd//xKSXJt9IYVEbQVNiUKm9uYq6RxZEAmQARAQAB +tDJUaWFucWkgQ2hlbiAoQ09ERSBTSUdOSU5HIEtFWSkgPHRxY2hlbkBhcGFjaGUu +b3JnPokCTgQTAQgAOBYhBO9S1orVJ2mUJJgWg2dU6pfFXj3rBQJdzituAhsDBQsJ +CAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEGdU6pfFXj3rVJIQALBArXEaFDdTw8wl +65nPLU6+QPc6eMn7mz6BDp1V7xL6Lq1GbArLpmQHIFhfQ/5Qmg80wuFBU1CNSRHd +tdZq3v8tB9Txvhy6bLQ+IijWH/TxSEPqnrkNsWBQLqAygDC5O3Ook/T6B5kuc176 +Kz+w+YhzPS5hoPfJK6xGoKDNlkhmI/EnUjAq459VNpXeoeemiydzvApiCHH0VfOj +XnmgAJsAJA21EfT5Wuh/WODsf0HkaXB0xoWZfE/ugIQBLhZi9nUTYgwU2r4a+v4A +4C2T1OyJ3mDU+Oi/z6d0WJvsIrLCFcF4Q7b/6+MGkgLDGlsEKK2LZMrulGzQ1QY/ +O4ck3dVDseqT2urplrTamDIh1IQmOt1FqMFwugdjfQwJ5HQeX6IeUGZei2Av/IZR +8Vw5Wxtm1Aksz3Js6iP3QmAh7txDUKO+eT5zLSXBoPmkleLnvCdtlvwaSNCAudHw +12h10IV286OetJvyyjmh/q/30sKNGiuucLMzPMwtLNW/j3cts3fqRHIHxepT6m94 +FoYIlwVu4afiGgSi/7cN4p9GgfwnFGeETd25pgNG0KdXbVWniO1dTEKzOtvtuPYK +Y88ZAfdOgj4dyeI9ZnJV8RaZvpImDPVHGQm69/071jBxyWZnVi/YtOm+DjHfw0Vi +uiUdzoIb54oWW8tbiNg/nfiLUaJBuQINBF3OK24BEAC9W8Cwubu4Dpr4m0IIrLF5 +zRRqQm9QIcEC0QHf6w1c2NWQTJP+MQY/jZLjtKw5yCQDghT+qsil2p8xCM0EqRd6 +6NqxsAoweTCoV0MwolQv5T3KuP54SlNWjO+6gT73LkKuOHoIyy5cS9pIITlExHy+ +XHtfQi1keDpWUEyvSRG9slu1DcxAeo6nFEpCuoQ+xx/lrCMxDlyZJCDhj2fXs2hK +8oKLV5NbIuifbXbCiOvZUdBHk0yLCEc6wNsVR30yLijSiPCKsAPcsG0PjQnz3eTb +0czq+6g50zUVOTioUghIlZ1DhCsxQGnlxoLY71pnmc7qVszdXPV2Mp7/KSIhDJFQ +LN0enDVz9aRXfpEK3SifxaPVNd61O/BGziza+XCK5qpEQL95UM2NdQCWixYmIOJE +k95tpnagtNupMkrY6WEa0CjVBzF1kdr5WpeUd6w85rA/opcqpQ8yLmvpyJ4tXZhN +7oAWZSUzyB904FMswUEhaS7pEJIlACeFcPwm31Jv/637gw1CopZpDxDUaW5/boG5 +9Gp9D/GV2gyMrHAcwA1gZSbmolv5ZYcnUmwTPijVNZ+o70HBbvbNZqziPgy9G+L/ +oGBkY/fpg7qfaGtAbOUbx1ck04CbafSUQIxpCG8in6zwrIRnn4uj6q4wIZ8SnvQ0 +h3Ug0DmdsxvB/xdfillH/QARAQABiQI2BBgBCAAgFiEE71LWitUnaZQkmBaDZ1Tq +l8VePesFAl3OK24CGwwACgkQZ1Tql8VePeuZ1Q//csRsGDKNrW5e0EitEcfPZ0PC +teEw7A16dniXiCQF39KxxLzjCjUq7U8iWNm7bn1zdXcSVYZow+i5hFWXgZLKTKep +tQoocJmQ7kPV5oiTBewFy9T4BICUekj/EhXhSz1wxb3GSc+uHL2IUlFkixTY4k4B +9zq49gkNkTM02Or3quu1ZWAgeol1BSyV0tcI1h3M0OXtrN6idLyzQJFRyMYtzfwp +Pd2+hdaKAl8mKANs/GMJni3QvyVXzuJxMP6SNOFx4mWj0UVFVZvosv1lLXDesvwY +sNZmz5IkfuU4DHz1ZzZc3sThkpBdBiadvyKtNsenNh5nEXtwVhpiFf3IdZAvG7Ks +7i3Fx1/ObbvxMCWeFoB6oP/swHr9i6dqntiJoB6Gl5y1ye3qte8PiNuwRVhz+YOK +58Ga3wWMvODpi2AgSFv7cd1OFXXsoonORfmpcfAp+h6dIr/ttQMP2929/NoX3Cs4 +/pXoG9L5EOpMfj0Q24sAGW8VzuCAHL3e7QSijFuSHZxz9oe4C28/mAY+KP0dif0Q +O3rq4kpqlhseyzcRyE1LWBvzuCeSTui2OPmyivFY57TOPnMHm5sXVby1VUiwm0B0 +RgBtZDRLv765lAFGtp43sccZ7zfRaKhkVmzh3bAZ62nJyQNGw0TWg96Pf7Kjb0Bv +ha8fS9ysWDy/Ye65MP4= +=MSiP +-----END PGP PUBLIC KEY BLOCK----- +pub rsa4096 2018-02-07 [SC] + F42C1A6E634C105E8D985105CA751254E97B9FE4 +uid [ultimate] Yizhi Liu +sig 3 CA751254E97B9FE4 2018-02-07 Yizhi Liu +sub rsa4096 2018-02-07 [E] +sig CA751254E97B9FE4 2018-02-07 Yizhi Liu + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFboMe8BCADLHoUiTXPKpDQXXkJ8VWi2iekYYReMrkKgBaQv5nlYww2va1fV +9VcVGAlrlZ2XFXa1xVF/LwnfPXtg0xD+lL/6FQJlL+esdosDP2E4Gu1AQMyuOS3J +1Dy/iMidI9XJFhoPikDkXBjcMQiVBABP6Goc6da3oFkDf4dnBUGbUI6SOzNsiJxq +IQ/JkICZ/8HfbJk3nIiAVfyuqCrHxWqL8JWM/SwWcjaZDRFFgCp5SAQbv9+xqEpE +GYcJ0bP/PFgyyLBlWS1vsnrlxVZf0fsnNrYHDcYA7Q7iiVjaHR426zea4nUm/72+ +9cF8QpE+JVFf/uhf1SpJ9cnvgSa9lKU+9EjPABEBAAG0H1lpemhpIExpdSA8amF2 +ZWxpbmpzQGdtYWlsLmNvbT6JATgEEwECACIFAlboMe8CGwMGCwkIBwMCBhUIAgkK +CwQWAgMBAh4BAheAAAoJEIlV2E4gyasMy20H/3508Rr+JvptnVMQQ0OzOhMiYdp3 +Pv3M3ES2lFa22AJKLg3snSIJj1ros7ZyWEZWdOSQocWF+Q1+xX54pEeRfcHuDHfv +QZPeYzVspteHS3rZZ51o4f7GyLbMY1sUEqJAzMne965Z029HXSUOjfcD/4gTOi0y +fnlb+HVcAtGA+Q+kU2R8FG43F5+BWfo0dGLev9cEAcW18sz6u/rmkc1ZHPsC0LG8 ++ZR3SEbMznXMe6RGbVurHOw/LpQsf5K+u7J3lb9fk0kXmp+NzZxybPU9XEmC6eH4 +kj3fJCbycBdscW0lDVk0eWL2z4TAQdI5Er88P1RnwCM6eQo0ljjj2pNqnW65AQ0E +Vugx7wEIAMFm7Im7iOL7rEYmJKcNvANSeL08AE12pHt5g2+8pd10m9oiyyAYAU/i +Ogar7mmNXGBxXy9gsgxUwGuFixv/nj9ts8+GNGi7pLGfYWdsD4P9LYtzZR7VHpOZ ++taIjqsgTvNtGCuqdX6uWCMJKgQ3kNl9cuzFQeQkAcCjrc4mJky69OcKCJvgJ+bd +76TgxKeeX47jz0Rpi+0qf3CFxc8Ey306BXraf4RThsE6ySBWvZxgYBgdTKOqHyNI +3DRnIHOXwXMu+k3yCPpUUJBfkSLQ4WP1fllSJvO/MzVSZTbdXA9jZFKv/q2cyMMW +gm/Z4dxGv1M3dYcJZTNkW0hxWEYn900AEQEAAYkBHwQYAQIACQUCVugx7wIbDAAK +CRCJVdhOIMmrDJq4B/4+85Dqx+sPfA59duYx4EI4zAn/c/EIBi1iNtu1KxFdDOR2 +JZSaM4JCAdCoe4Y8W0euwZa3UeO0F+4fqXOjiFri2+l+JLdtdeAwApMSDNmjqHez +WrLF0QEZ+f2cErTQ3ZhAhxNO31RWN4okTMaZYgqZkMhM/C732AFAdk3Ryc4Phz1z +T60414x6iwNOy3d7QvnsFZqH3yViYkNf09cn+onWsUhM8AM9dWG/uA1cF7u5UMAn +L7jeE3ner9eTyJ+FfzjLS7N2XyfGlEdzaQ6b9vH4WfBBixSLbNWEaQEEFNqyBMB1 +HBUJtd/3SWWQZY0MMpGUIRaTGsrXCHrr5aqkUHUEmQENBFnwWJ0BCADbZIs3nd+L +MWCYQK1badVHpzNBRYlbKl6TDsrxUje32/d5FhJ0Sq2yP0hSm5ggfVqt9Pl9Pz+M +tv8cY/gC0Gkod22iNEvn/V3Y/+tXepsuiXD+cyGWuAc4kbN6kHBmCHKpRHpbO2NO +AFm5cKJEVu9yiBBxz/bA8HeD/HjmIPnZfTBADxl6qu+s0YTaDT+N9c1M4JDDB6zJ ++iYCrv23R1SvoBssSaEZbQD5+bW4efPslOwjAysG8r7CDrJA8aXSJfudMKu5Zxku +TMzVkWGWj0VnEHRkHVqagZvXHc6gkprlKh+XEaj1+qsa3anORbEEaeeBLCEvyLW2 +j7mYRJ//cWyLABEBAAGJAR8EIAECAAkFAlp6jHkCHQMACgkQOnCfbyuG6dYCoggA +hTCfDHYSdAVUx/H+45Gbnurdi/PDmpKIom2HaGF2ur1+uR/rCumPCXoV2EuQNntD +CxvXsWDhTEf/coOAfJ3WiF0b4AzDOuHa3+YOGHBD6/rPMDtFkorc8EQhoqrOvOn9 +8bJBetXVqmZjX3Mwg4aii9Xn4b3G3D4YaMXHiXuH+lOa5YU0oMze3hzMCh22ZMl/ +IQqZY9B2cc1q8gXjtK4JN5k8etD4U+uOBZVlWyDrbD2jMC4GCXeiAVESsh99Aes2 +yc8B/jhDfLF4LvYW72AHkNlLzmdEs1w3x1CHMP7k2sS+QznTTtmNZ3wonaDj1fdu +ne+Wf1vUtLKs2DLT8Av767Q1WWl6aGkgTGl1IChZaXpoaSBMaXUgYXQgQXBhY2hl +KSA8bGl1eWl6aGlAYXBhY2hlLm9yZz6JATgEEwECACIFAlnwWJ0CGwMGCwkIBwMC +BhUIAgkKCwQWAgMBAh4BAheAAAoJEDpwn28rhunWdTUH/0j9HOIKyj368ImHr7WL +CRKi6E9OwN24JP/sD2PMxq5DK7vlGRumQ7qvvDU3DuR13EmzEBpTY6IQagShvH8E +EaV9r5cXmKnISAqGcL+Xe9HyJm4ANMLsTjhX9WkDFYluVfujVHSWq8jz9KDGB6Qw +dpmHWfHNVZGKK/TmINwL7K+HKcSntH35tL+NrnbbpMU/XnICdXd8ZvVhVGFpCJy/ +4Deq2zu7qeVaAmRLdwo4wr5EZqvWDgr4iIYHTsKsM2qQbfe7zArXi1oyO0vyncER +5sAmaP1iSEPp8zaTslDEDfd3Vv1c0Kegyss//l4O11mzXJkFPLYD+EnaXsmkzLjw +udS5AQ0EWfBYnQEIAJ9YYPa3T7ENm+lOyJDW0Z9MCWs/TfH+MNx7CGIf7UiPtCgY +AMBz3wDeQIfMkU//21U6he84FGiEk8CRvxXgsQ5jTNCt5BYD2g3/FsriEG1QiLLJ +sURsfacMc5YkWojbSELF4Eqs6yFt52rb9I7cpWA0/HIBpW1IzD11BB2BS0qPvx7d ++emKwbO+ZR/xyb+xKVcd8NMR/gIKBiMZY2fxqyOK235aRd6Qnfp2XnbgKFdiZN9j +A+xo0hPkGeUAhRKZfzdVP/kVzZZTiASUilRNen9RmfEvQiMQlgNrhHwrHKKifz2E +BR85HyLVraEDLQq6hPVBYZlf1h5TX4SBL9X8AsUAEQEAAYkBHwQYAQIACQUCWfBY +nQIbDAAKCRA6cJ9vK4bp1nO7B/9VfccO6yvoT7oYFMuXsJK2NTUoBID6PDFQjDLX +ml54xoNwJmBw4eU7WWKoha9GPha0VCce/9Rlj9vUQxoMA9Jt0oj3Vu3OwQiGdkay +I0cKhYVAMDtaCdCoha7iW8pd6C/zGKSBvSKwdxxe+mD8+jE4+LWRTDvuUhQBfBH6 +uEJNus48gdYIDfPqujcn1coGeLzc4TeKUOd6qKhbY5rGL7JTxaNJ7O64ffNaplUP +w67vt0J2iNdRJglaFippQq29dYTfdpddnlrZnMYjD1M3FmnpCEOWQQKT3Kz/KYGd +iy2XFfGsSWm9s/+3VyEC5Y85iqfhYoWkISuWS6uOpUwf2F1zmQINBFp6j2ABEADO +07fnGhxTkPfmRsJS65Cif6ywUVRl2ZXKi/N7DjKJdl+Ej5lGOaw5cExaP0RD5iT5 +ZCAzfUS7UFULybEcbqgnm/RzaCrz9mx3gLa8Jx9XncagwJQU9GbvJxzlX8itgY9v +ezK1q7Ec/iwCA66suzLeY8cA68EvWwmjR/1WlE9W/gov9mSlCu7QRIP9DuUHyL9Z +ZtYTwYTKsSaRCTv42xvkxAQ/ifinYZn31uQmW41Gqt2YFNWlfp1uA97dmyAKcIeO +CkvpQyChspJLIcA5lQrH6RV+oyuhRoSw/ZXPwdRXS1+79arCe1vsMUeZPzkzSXcr +pkzROOzVx1WlXR8WYcWtaXkPsgn7Icym0ngnwRbuY0JACT2F8MWgBlC3LQj0mrhC +cr26v9ettcmeulmuY/WLIi9oDtgq2yHSbz2na+qbPRd5vDS1i+nD62xvejZXC7xI +naoyB0f6QYpgXQKyEFO/uCUGFXCBwYAPe4XwkNozR1GmOTKP9ZnriJax/BIPva5J +iqK5pkqOxuiGuPNdW7Bj/HvQr7F1s5LoG+Q8YSVo/KEJ2oo3IwU6FWZwq+KY/bVT +O3fRCBD7Fgu8Eu9zw8ANIvpuq+BDo3yoUCoS83Ok+favH0K/jwBVFyu+/rnJc4wn +7Px9/zdniaSTuxK6pAyTiUtVy6Gp73Roik5Dhu3nqwARAQABtB9ZaXpoaSBMaXUg +PGxpdXlpemhpQGFwYWNoZS5vcmc+iQJOBBMBCAA4FiEE9CwabmNMEF6NmFEFynUS +VOl7n+QFAlp6j2ACGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQynUSVOl7 +n+TMiA/+LB2vDz8ZzMRTwlnWxxhiKU8+P5QEvC7sgwg6REiqjEfo+Abcf/erRzMS +nX0G4G6xauty4NDtieUI3X/mDKUS96yqo8Ij5NO02ltI0isG6edlyyjrs01yiGHN +KjTDkU1f1Af+wW8/h9By6cf2x4u9VWfSUjzwkcrr1qorP0AU1cSXVDJNxnKKHbds +BlVC7UkUX1ZMBQq3inFIox5y1cSL34joUGRcyFtqZDoTvYMIZgAiJJw1JmpQU2bt +e3T1/70j6za81/09ev/kN9HIfeK2Mh0IVTttvBdggmQZHhKq5tL70v93RUoCaRmJ +CvyUTaSe1o57phzOeUj8FmFhvqugnrtfYaygdvjrOZYXo5R18jXiQG0lNQPuxh9V +r6dca85aP12yB6kK8/d+09PaEtirqwW32YcoNeiHtPWvEIastcO+bAE6OKFWHE+3 +mYKr2m4hAH6CWDOa+x6p9JyciTKxEgaaXcj/q458r2S79iMeJknzLKw9zLPjHAm0 +tb45x893xnjNSSDd8DhjwwwZKCt/pZs2E0pyp08DF1a8uCIdoQ0s4eo5Yr7tJxGp +AWgd/VcrlHBmmGdqdMMUhS02BjuyVDXc+T3fbE1a5QIpHoqjl7lyeY+VLnOUt9Y+ +RyWKsDONsB3QcuMRaQQWGf7eeILMIZ+Y33qpt0/55qLbzsEY/265Ag0EWnqPYAEQ +AMLE3QGCRBZU4nGKyOIpIsWpolG8f5vnAZJwsC6g4ya3odsHuUknDo7Puhp7RCIx +HuEtSBTf+20nFifX7GCgHAKn/mGWDk9mNWmsGpVzXcHNO0TKTod6V9FE5SC3CVgg +K8U1PesXh0PoV2AMWq1AmzWJyivHFRefuPilu+NVRE/Mj6ZWbs3ApixMml/0S1Y7 +L5btNjG1DCZbs6i70nSuUXXXM/D0jkCYljYf8wtruzj1MN97NZP2nvGjyBkGw9tN +xyWYirZ5jJOlzbee4rags9agxETrZ4z9S3QAFcQaKNI32HyuSJELgIcx5U/uB2f1 +9GQX/33kk26OrTAW6INUCRK6ji2y0F8IxfrHd0WXj/RFrV/okQyEai5x8oC1+Rik +62CEnI9EfL/WU/toHtSeFBfNrtTKa3WiXnQDfHmJBe1wfvOmM3QjH2ApPBwUXXbl +m7wBCPEjQJs+B0FIrlpJdN+KaGMMSHsz90f9QMF6GH/pgDPG7K1IBsP3ZqDzJi7C +LnLTAf0FreLuKLix349Y4X603uNd6Fx6vK3BGWB3ZyH7D1vCMBBytDdb66nmQQ3Q +ZjJBU8FCGuBwd8q32bVKbIOQTQiMUbUGe3xZozC82mB3glEUCO46OElD0j56GC3X +GVK4utPexIX9hcQ+uSXStrwhgHd76/iFCsb1F9wR16EVABEBAAGJAjYEGAEIACAW +IQT0LBpuY0wQXo2YUQXKdRJU6Xuf5AUCWnqPYAIbDAAKCRDKdRJU6Xuf5KqtEADH +xHPTbl1lT/QZZ+Y+SSuDpPF4uMjUP1TPyt6LGK9O/C0raIxabpCtuit9VPwcubH/ +krVQxqIkje1rI6kjl/+krrwnnNhjUozoQh4y0e90atgu9phoQGjb12vhl5P95OB/ +YX8ZRJ2Bt7aSTfZiUUbL0OwwgontgLFNyz9/FNp/9eSrxOcoMazkt5D6SrW0IBW9 +l5SZeNDc9yYw0CMg/5YZ5Rv++APgXHWc/WjuDMHje7hi2VFM12VXF+gWQZy842n5 +IQzRPx7Pav32iByN00qKLNUUIwgoEQwZMStC9xjooGSmqOVUWnMYBLiUgNTySgOh +u73hZVo8VNpOseatlaIRGC2ukn8AF5TlXMKf7O9L24x6bp3Bd7M5KUNCUDgwn0mj +VjsGEcT41Rc9XtglB7aLTiKhE/LqGi1f+BQolr6nGLEQ+oVub3bqratmjAE7Pw7B +yzup78JPVMt8vNdjwGYg3yHW4atLS1qUQ9VNYo2l4b+DxcCvFxV/mAfa+07j1Z9E +p4/Pw35uanSfOo0ylGmHp/h9yh27vrF1EzwshB7DlJoo5KfnIxR3jVTKye+UerEt +N8yATW8CRIKO3IobUfLMDdPCLO7uzoW95cI35Y0l8JgK2NeU6tVZptP5mDogeAbq +8PlimrXuzG9Bokct2SOO6Z51i6rSDo/ALj440EvWNw== +=1xfH +-----END PGP PUBLIC KEY BLOCK----- +pub rsa4096 2020-09-24 [SC] + 6A0D4938D8C052C759AE2460ED03B26E4FC3509F +uid [ultimate] Ziheng Jiang +sig 3 ED03B26E4FC3509F 2020-09-24 Ziheng Jiang +sub rsa4096 2020-09-24 [E] +sig ED03B26E4FC3509F 2020-09-24 Ziheng Jiang + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF9tEiUBEAC90om00alNSupM78ZZYMdwKZnJLIhAD22YARntVNVBuD9Znpuo +BAYwjrWdAi/npwN+r+Pd7Oz6fMBCmB3e4tsrPnBzauGb6aKgjBMHcVEx0p1197kk +WcGuKt4FNlHYfmc2sOOQre2GcIVOU1XuK8tAhgca78aorAlMtOqq+/ASnKcjRSjW +0AOzlEKfaGVgst2UO7Fc/w59S3/qv1vBGKnlqLvsJU7kNR1gFotqsGAee5Vu7alQ +WiHFJbW9ujLTPu7m8enFVuBGFkPsW89Yl/0mXnAKZFFNCHIQ9gkT+1bvZhx8ViJL +4UeqG7wnSLSSIQz2UPBJYV5stxNtd9HS08Tfviv37shd1SSprFLoQDk87j7wF60b +AR5IjbVgdprpmVNncO5pnyZwXXWVi7ZyiMSaW6wg+lkeQMGflxgL+05xOafJYgO6 +UepXqu1mc7Q4eVUyft/EPmdyvlg7Fo4T4Db2PnstonkZCyLogdaaJRuxCc0AR/O1 +oNaodrdjqydXVnP3d/gJ5gj78zeMPVbGbzwhpIwhfDouxftaU5zc6prBsMgY/os6 +XMe9bNZWpOLXZrmo/ovaiebmxT5ZYuFRGdeRl1/Y5CWE6Q9JM8euwKuskNQ4G0aY +fVQ61Cxg4hmrnsv9YFAjf9PPWhpFHvILQoGSs3HbJCLFPphKf37gzfiZMQARAQAB +tCBaaWhlbmcgSmlhbmcgPHppaGVuZ0BhcGFjaGUub3JnPokCTgQTAQgAOBYhBGoN +STjYwFLHWa4kYO0Dsm5Pw1CfBQJfbRIlAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4B +AheAAAoJEO0Dsm5Pw1CfBl0P/R04MxtqC4aI0fpdwmed55kGunL1W65phBgcOrDL +58cv5dKJzUmfSUXw3QANcFSn9Q9Z2clj+a2aiGKV5cUiWN0Ny7y6wd3aVOXlRHHy +f30aDO5Ug5RDYbcChTpen+kq9qDXSr/NxXYLWvhobMeXfiA9Priv49fFWEr17Kai +NOuoix/eWA5WpnPMf/Rz4HibKcX/izXTW0NOH54jn+4P9M4ZwWbn0AXKoq2i3zF2 +vZavCStcscrfs+kihtEVvwUkyrmSIblIUdkPNxeo/jx7N9Fbu2zXbhl5JiBmBUMJ +XyFUOBSUDUzA5EvWwXp0yatULOoCH/LIyt+lLdkyfDjsKmAavGf9CcFHVyDIG95N +34/jECPwwVVkbauE0XYOwenOh+Be1goOA6nidB4QT/rGns7zvCNG8+3ttwA4aiE5 +3GrVWXiPaEMaoM56Phscek30GoLjB2gjvwgwa9oGTDYTu8Z4ifLk8qq8ij9uEG7V +cKns+1C3ZvfdKi8SmOzj/v9krOi8N4YW03YS2Oq/cGPD/SttoMTOCPxi0PLR7uqy +YXugsebxlJlXBNTeZx+iiKmkrsILjEd8pUChw79crtH2SGOPqIv1BsqObstLV04r +iiywruqLRIGlsr8BtepCeEfzW9nJRw7W2571t7oD7QbkdCJ4WUyhMJH73+7KFEE2 +fKL5uQINBF9tEiUBEACmMcP8/zm88BmyhDjWV3ZrZ9cn0N3JJfSONt0AcyE5TZ2y +20DnHkp3/lNK6EC0k7twtcce/cnKDbXQ/IpuJZwReq5SgmCoGbBZShjALtVCzQRm +pSA6Wl0JBfw36/IdKUuf8LZtENqp3jgQkkT3TA+/bCh1KQLDYFoVQjUBLiWCDHiL +iBV5L/PH97l93hkxbSDXrBemQRbr+xhA2TzwcmrjnscNCAXkwU9f1Ygh8zDHSJKB +g7Ln+ot6QsPhNQEQWhju5xfAn9+kO8OWSAZF/lJTT2Wy+spDBP1ZnviQadWPj5HL +n4G1qe4QWl08E9FtqVKC7r1YYzT4DlTU2AQ0bJqdvAtojX9ji2Hp4ov8xYPHzy3a +ZRdDYNWN6i0mbpzj8SYojyEG5cy2j+nzGOYTEdpwW8pG2aCwRvnO+UqXNM3UyQk3 +9Tyfyzw6m9mlq9zaw/nfvOIA6Ns2QR5+UbplkpwVMqMAzZNyEV2wPe9B195MN6tq +KcznzawD/W1ORccOxrpBXhN3sJSc5n8Uy5pHUHg9B/TdCSLpr7tqqS34gB+AcSUL +NxjdLn72JHKxCp/wpg3Z4bmY5n/bh/D7Ovt7LP1D/MW9wiR3ls/PtNAK4+SV6oqt +G1MNS0QgAitovF8dpmX+/zPKax7baZiJY/sDr9crfRvd6e+HYA3yDo08Z44MTwAR +AQABiQI2BBgBCAAgFiEEag1JONjAUsdZriRg7QOybk/DUJ8FAl9tEiUCGwwACgkQ +7QOybk/DUJ9Txw/+NXL6cKEIm4NQrBc0RmX37sELc5UnvpycV663OiPF9qHE9iML +EUt/LBxrGUarplOA66EIkmmnekUgS8ujjhGOw152nSuZTgoPxX4ub6PI7Hi5lmqI +ZtEpp8VoI+XxAdA5ecN5QNP7P/ovSIZwXvIF00YXqGp6keXi/qdYkylt4s6zLDiL +ocfOZWt994JVIl30gogkw4PmcWx+PKXos+Hq1La7iZUn1pT5kEsN+fHpnh42sAGZ +dhb+puB5tczhVJhL553Z6rh4BABd1DqAZihwelkRRvQUp0Fqgc2oxty5o5pdHZdS +ulomOqGgERHsrtwzqD/n3iep3z22LiitZHsKZ0OoHl9e1YsvdsL5rImEz/FdWwgl +muO2ZjY2KhuovFROCsGVgw3b9gzIjtE8FWE6wSz6qzKihBbI8YPtQqGJgnX2A01m +AkfpGPb8430OghDCQFsrWkuTjmSw42ys1lALbK2yQGRuOCq0dIml1QdE6JfU8ceW +QY1dhH7xpHQxlr9Tcv+enCc4UzCJOnXgkVUnD/u+TqKL9GoSFu6KQrC7jyvfY9t8 +Elf2ReXYVK/jGUePdDFurp+3KFlAHFuen2VZTcNZaUWoYoI84VDEh8/oPEPfzveJ +/GhL5vbglB0H0aG8SVMaTfzr+nXHUVyOSrlYYk34O7bSimVrX6XDGPZpsXE= +=nhJ/ +-----END PGP PUBLIC KEY BLOCK----- +pub 4096R/D75EFD4B 2020-09-24 +uid Zhi Chen +sig 3 D75EFD4B 2020-09-24 Zhi Chen +sub 4096R/285DD7CB 2020-09-24 +sig D75EFD4B 2020-09-24 Zhi Chen + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF9tJNABEADpv6LjNjEkb62cIXKHLcg0vugPgGzui+cR/YyJc0zNMPnQNnvD +D0VisV+vdorYDmXMLRhGC4KbD6vmoFnriBJrdKYpl0geV8Uzw3S3Ecyh4ELK1ayD +aVpyE5A+73LbLTSnnmfonXcWdW03hZe8ilKc8vapvibCMQ7SVvEJ8nHHd/YmAteL +6HNLfTkm/pBZ2MxX0k2Cm0qX4UnERKEWFPGjXWViRaMLWl2ufIocA0KXWRRgiprb +FwMUTUCjWiPE5L6/8OKjKHdIv0gKi3WeMPYpN0PNTclqlZ5YAbxuSOnuRXmw4D8d +bnM/V+Hq4s0VQLMItbdGX1pmiJmHsXRygjgvZV6giusNge0KWcKEUvJaU0wOOyTC +luAgqWo54pMDHEd8561uvSishgG5/ePDNTdouR05J4MOl8cVBFrPwqPvymk/Wztt +qsYXiZhJ1vdMD1X4UUNYg3peDHsIZWdNIQTcEgRSArSahOlM5dyjSLFbHj2gjWES +pP8EqmkTn1/nRUaFvhZCX1r37GYYbHH8iD58omG38eVW4uOIyz3nwl5DxUO2+4it +NGigNoDQ7c2OMn/F7P9m3juxhx2hyaR5nTDm48ZaUbMUFbfWqTIhPBSqP9qZr/8g +72zY1AkvPPOfgGChJ7/XGnyzf94C6DtDJQgubjnNsZCHGQyNdPAz3F7U3QARAQAB +tBpaaGkgQ2hlbiA8emhpY0BhcGFjaGUub3JnPokCOAQTAQIAIgUCX20k0AIbAwYL +CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQn77L09de/Uu8Rg//aoDx5Sus1FQ+ +BIctSl7gc/grs6rHYFPi6d/ZH0vXbsa1c5oARVAJd+gnhrG0Lylmf0kbcixriNwT +iVoo32CyA3VnT7vT46moRCe9UQBuEjIYWo1YiAXq5we7stsWqwpCDhR/h7weuhXR +Rr4nIzLnFEBzvLcuqorVL3xTkisnu3u3i8XQsPbLkQQXohP0BWJoBZKN+VEGciWD +26rriDeGw7ew5L4qJe7AKwS9Zt0jQoiEINp/CtINOrNNIhDxaKy6AwW/1wohVOtq +TG7kyU+zgtYV7+nZpXT62lKn3rOsX6OxsEU9IPqrCRMW2WExxUJ1w6iUBUcxwlUz +QbVdCRLMfR8HE/BgFVQLP8ESka2gH/tcFEp+tsv7p47wsrs9NbIaL+yYWQFat5r0 +h5ynjFAFmc89weC5h/bVKv/W0Hrt5YZmWxVjiSPZW13eDQ00PliJTyMlIGxsboQG +3k0+MxH+bpj8VK5cfnbTDg77eiX2XYyGuV4Vs5Yv+y3qiShD/nGJKNrKk6LaF75f +hwzxEv04tBdNVKjYm9IY64pg+3e916vTpVrhkU3+frzQPr5SYKr/v1vYnknADgn5 +TYdtSKoWx6D98JWllNcOhfYw3auiF4f1E47yJ00JFVvNQzlVDaqeGWbRj9DMt590 +BDkAVsr+//2SrmjYvr/Q/b1EVC3nwxC5Ag0EX20k0AEQALaqrORp/Y/GHDgGxYfF +Wu4YNwN4W3JCFKVu7s7yC4T7ndcGrn4MEq0b3kuJeucl8UO9IGEILokQzOHadPDm +2mvUYzzZULgUCRQZTUpU4AhKgbh4eB6LMcxiIRVDaCO/xg2+dFyry7+kk/gpZSy2 +HCot0jvFCDFOxXk6WRCycy/EXuWvZyKMm7Bp+CViX9UoV/S9VkdaxFbSCBWdfmTn +d1QT+GivOLxY8xy21VO5qugnH7nEvcnbniF8zqprpJlgvM8MRW9voFCZg3UOjpDI +cT4foqX+h8FL6BxLKMrYEAx0rdYdn0weaptqkdDc0BAz561q9bcprOmIIEapQ7yE +N9vcUt2i2/Op/CQsKs2dZx13RtHhDCJ2awZ/eiKdMowJKx0dNOdsBVK49TKYSt6G +jQiIKaFB+x4ta6IWDVq8v8zctaDI2ud4OR0kvwP9A4rbuC3k3P+4ojo0OoUO1Ais +g3wSX+bHtOrXQVPuJiSaKp/zJShiSH1k7DCgqPav5SuF+fnuIJ95yLPdDlmxKqDS +JxlCaWgWX6j622zTxJLlm5YgedBi1wKx6BqLdKE1Lvv8As/VwFMxDv9smDwVaMlD +rrwPc3/BT2ES1Pqp1CmgZKVmQ0/4UvvIOe5MeNI5pQ7NBO+0rTN+JkANEVSxTi/u +Ne4tPZk7C/Y5iQvAIYq5BZUJABEBAAGJAh8EGAECAAkFAl9tJNACGwwACgkQn77L +09de/UuLYxAA1T1YT5e5BoxjVm1WvwDmg8Vagwj1E1ZVBWG2H0/uL5ew+7/4Jy4C +iy5DZOdGUbF7hIH0J6Kn/UJGXyRPQh3rMk/hwaColpA6AjrV8LxMjPwgjgUWzBMK +NlhSsGj4UE84tblGL65boDaGeJKYbRU4b9Rw8nx3jQOrSk7xwmLW8qJYyQIgUKPv +hLhq4Ni4pfxDkKOa2GT58Yg8wadN/ZB38q9r2cU33XjyJo9AeWeeqYS4LZvc1mDu +DUV3C+RJpNh8+njYSYeO5G8CrQljvUYVWaylL51HPVcRfc+9u+uPfdf4pdcng4Rj +U1LyQQX+JaiukZsX0xXk2CiWSAhWfP3i92baX00SeFkNXxVmUVFjr+WkEPun+EVs +4cnJUHqgH2dkAU3WAChGnI9gJEa63gyN8N4IbTmQnLOnyFt57jmtfMrRGpU/BqFN +b3FNtmtnms9QjqrOr8QxFG78jfNluuEIslP0ul0fbwovItpZxLF3cIPr9M+irKEv +9w0IxAzfZ1IPpl7EltKVK+gN9Wt99Lqkx/kJHw5R/HTUuM6FxjL+1PGWYybhuU9n +Q2YsCQ/Br0XhJvC+i6OYgCI1iGLINTe9wjsi2ei8ZI+2G9XY62sN0orIIjIadns+ +8WGuWI9h3RBLY7aFMLpl02cXrsOiMcXC1Uk/e6e14Xpu+Y6IG4KKkUM= +=GEwA +-----END PGP PUBLIC KEY BLOCK----- +pub rsa4096 2021-11-10 [SC] + C5E5C09030E7BD32DF9A67CE35ABC9676004ADAE +uid [ultimate] Junru Shao +sig 3 35ABC9676004ADAE 2021-11-10 Junru Shao +sub rsa4096 2021-11-10 [E] +sig 35ABC9676004ADAE 2021-11-10 Junru Shao + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGGL/BwBEAC8krTtoeZUNgWVTEBZ8Gm77xwy0W1NjpqY6+cT01xW1vlsjMBl +MoR2bGA8aR+vNERI/CfRN8uyplWyoCK7fbnk0Rcd81nvdMqYiXWg55PetJ+oPm+B +8j26ssUe+Umg0cwa4ZUbdmicSSlousjR75XlasanrGggn1iH0ltiwvkyxKIlppo9 +UTgh6Db3sK3i+hNrwZmMliG03CpdZqh9luCQD2KaHhL2v63fzEo2mKJLHFQGYmRR +dkCvF8GNEkoyVbOVRY+jnZ97C6U4XigAwwqi7kBp9QJ5DE7xzjXwCOS2QNUzvdjF +/OE3zTVJmx5qSD9i5u69A3iXBEfVd19gCDiJIkOttgfNgKy+atK+Bmc5iM9aizCA +jZQAt0uOsPXzNkiiiJoTp6egvt05F/7Z/cy+UQZb+GQNRqMr+8Z77QjH1fAAB8qz +q+Z/W6Gazws+CiqkVrvMUKCIj3AxHWeiUDwD1KGap3WkpocEuJ2IXuYUlySDIFXv +Iigm0a0KFt8Ex4cfz3GNS6eH0bjHn6YIebIQIRRYI4kozy/JMAYJ78Tx8Rp0WY38 +85PXQZazHRriVttc8YrnK8uAHjN01COOyGkwYp20Xqw7dOoYCnbhObYvoDDHRtMm +2O7TtK6sfnyWhL9ZRGOWyqoIw+4TIh+sS0z1dj7oyWeSaPHTCbj/7CneZwARAQAB +tCFKdW5ydSBTaGFvIDxqdW5ydXNoYW9AYXBhY2hlLm9yZz6JAk4EEwEKADgWIQTF +5cCQMOe9Mt+aZ841q8lnYAStrgUCYYv8HAIbAwULCQgHAgYVCgkICwIEFgIDAQIe +AQIXgAAKCRA1q8lnYAStrinMD/96v0V5JOtvT2+NzkxyoZPFw/1H/jtAoCAm2IUq +PhUGibAPztREBcbr40I8l8bLghvN3PyNFop/TY7uxwzTzJrST1eZxML6x75pw6QK +2dbY0fFV3SEucDd8mCtVk/5F5ZWd7pXfYq4HVIcSikL0RbKHEl7N8fCRQHBQ63OA +MugeAnTfGhppQHLJQtN9iKx4iHt5aH38MMlhlzfqEwjMEfCm0OnnEWjLbjQgCFTU +1llnQEWxT1kwsiHKNvuTSuLrSP5SHsE/VGixLWUw3YzvDFrP5pmnY7XRz4jAynrS +QIoKnb6WtKCuos1Ym9gZIqXlPKZWfL93FBqD+lmHBMoPIVlubAOGR5scRd7sWhDd +ECnRWQZmIQ4b6g8dmcFQ/vC+1G75hr3EZEZHX6F3tS4lLHZ9NxiKK49ctD6UIJVP +4FIAOY+lB1LDVRObm4KuQ9sLO60p7Bh3xqEzqDZRLwO+z3vo7nQl+F+SwWRcI2tN +BrDaM+MDIrBiwPH79Ehi7r4fFVqzmHDvqa0eBjUnVx9g6AnlR91/4QX9ZLt+rUlg +ufJC35fUSJpRLLWUIAto8veLv7rd5mwbeocnncAXlx3+rN9NDEiFqeNeijLfv9bb +VXa7f1+vfyTn+ZrxB6vGM76bzZosJUWBhrVcq6Pv0Llxowy11z8tMBgALoKnwPhQ +KyBcNbkCDQRhi/wcARAAqA2X+BDf2aUFaMdSOGfxTf3y/moLREw0xw4zfGzpeMjY +ln+GrziX/+3bdwiDw8fwbe/r6M7jRW+66ndzI8J3qz6mpZpYbSUYdSpThqn2M/Pn +cwFjzP9hn5436MoiO+EPz7dukmXq1+a7L7arQUdpQ+LReFg31M8uDiaKmOGBibGw +2NmyD9NRsWsWn4thn4lu4ir1tSfgkSlSJPQyGF22Y1h6I5serjAbLqrXFG8+ziKv +HBXofYvQEnHynPzByJUy1CxAKojyvR+ARiSfhW2EOlB5USLjjGvgIKBko912EYU1 +s2GblBPkdBgHpMaVq4+uUdQcAvOpsscsoMMB3GQdhnMHrZGMjN+fPbMer4w721yo +495IOFGE97XSiO/1CPpVzIOPzl+QpSuRdl/GlKr30+vEUwTSXUYEbYSRMCofiRvv +63g6+dC0aN/8yVmnXCbehPu2EOmD5kl4VwrIADy7D1vIpXqetfIXPToovJo1wc/m +ZNXDXDnEImP2vQMuIb8pF/G66yfIiFTkvlORp3uA+G3wujnqq7eouseBx3vC7gap +fsSLqnMTCtZgh+qrogbeQzTNSVBQ4K6i1Ipbq+ti/ebRSMBf4WXeByD5Sk2+K1vo +5njW/8yXgg4zxpHdZo+s2RtpIzYQjjQFRFstR6RbBdcl7H348arvQiucyeYmK80A +EQEAAYkCNgQYAQoAIBYhBMXlwJAw570y35pnzjWryWdgBK2uBQJhi/wcAhsMAAoJ +EDWryWdgBK2uNdAQAI1FtRJ4mI6EOLjk9L9b/P3l5X0VY68c6eMMRc53goRr6cMj +1DlEGMSrFZ/uxadpVhdr7XZSUJy2CP8XwL7MOzkzGdshki1CgqECkkm4PPjBYUlJ +/aNPcQuaz7C6DF4X190Q81dCWG3nFzN1jJ8th+IRzTT6y1xJzMoslqeXqNf5sHyT +3tPkgLNcoFvUBmLglGlWOiuiWSkI+FFi+azGzgplPPWQiFEf47N5iEyhOLJYFkF+ +fR0u056EdTLV2pMqKU+9OEbB0gO8c2+hNXj3O/g+d2GsszrxHzLWwiX2haLfAcD8 +Eu8HBTp6nIa+q7kEAhhEoT3KPGTvIKFEtKzQmW9qa9XtEXjLHnmrMURGw1epVsE4 +/c1u5BughEZi3yw+yupnkRa7uR/IJw6Iw27OHYg9fyqMkGvfT0se9JTgud9GYggA +iaibIEq6K1sKjTE6Mk6KyGuQR6OrI7DB9HueFG6GP4UpZHXMdgqvlYXtn7iKTz8s +H/r/Ge2qzbQOFfJgZ/pI/7LL65XlgAbsSo79neztm6ExN5u9QBkpjsKYk2Gj8eny +vDrH4rzP6lkvLqCpCnOI+NHvmTpHI6XCi7XmQzBnBI7YDlNAuyx0axhkMCZs0bFx +lFYYlF0zWyTPNpVGZj3hMWq1mpsBOY4SWtN2T5gLoCGEPrgrZn1Gc4xFHnve +=jNCy +-----END PGP PUBLIC KEY BLOCK----- +pub rsa4096 2021-11-10 [SC] + 43C2306EBED3D533F2CFBA8A2C75E5A496C80880 +uid [ultimate] Wuwei Lin +sig 3 2C75E5A496C80880 2021-11-10 Wuwei Lin +sub rsa4096 2021-11-10 [E] +sig 2C75E5A496C80880 2021-11-10 Wuwei Lin + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGGMC8kBEAC0Gto+SCmuyo1CxQQkUwjkSgaU8eIkTNTHi3sCCZLC8nz1W7XY +Ksf/QgZQxdA1mrI41M5OWgg5fh0iySP6aU/C/WPCm6ebe4/VAo2BTJy4a2TgM3pg +LEd63mWj8XUAXLiQY2yju3PhkY3DCfVwghc81qqm21Ny2uGYe3w56N3OSOGUnrp2 +1zcALDvYhE00fszbPlDtpZ+YRB1Xf/NBEd7TW3fLzrDP5MqdhUt0TtdpfW+at6Op +6xW3uXWWbfkXYFMaM6xdatzqovMwPMMyUi1mnPRLL1i3toezm7GSAdgWHxlZBNEU +lyyg122NuDhx5Rbri4qHTUdiM5ZwxhuehJGRV1QmpRG2n6XVjgtEICL0NjoFTcVv +g/9EqSLKKmXo2qF/ubMepngX9nDcSlLLH5zQe2hvup3FSGep+SLJ8+sMjf01H/Vo +KiTPAN/C4LRsWiJTfHrUANminORo6+FMrqougk3QVd1X7X0MQFtxV5JPj5A0YdE2 +YkJAVMCOi/YGtBpktodZBpChMojjjlb0QyWo0YCLUcBIoA/Y6vCm1Z5CRYzUdvWt +SS94viKXpNJQaSospoxk1uWeEXM6Sj4/J52HGv86gP8EZwcbSjE6yRydokgB9qyW +nuIWEKFiDl74heAL1wtpk+aS3UVjb2dInnpALmNzNMl1UYwJszViTraz0QARAQAB +tBxXdXdlaSBMaW4gPHd1d2VpQGFwYWNoZS5vcmc+iQJSBBMBCAA8FiEEQ8Iwbr7T +1TPyz7qKLHXlpJbICIAFAmGMC8kCGwMFCwkIBwIDIgIBBhUKCQgLAgQWAgMBAh4H +AheAAAoJECx15aSWyAiAKlIP/RhCvkX4evnIlgDTNVt7W/XMFUua638mAj3p752M +FnH7FU/OTySE5wc/P4LJI7kNBLC9doF6RSpjrE87lSBRhYyPU7LVlTX5j5xbt3HD +nVZWe1XAj3wORR9mYDJaUABCY21qLBY2WGDeI3qGAQ5vjw/13HoYZAKcsQ9T8FN6 +FM+T6endSJUkqKSNLw+PiUAqosqI3ZgbShleD9jdwHzNqldwGWV47wJCS1UoOfnu +2I63EluPhOO+F44KXs0mAoEQqeqpuA4oXeyGhkbePR4xGIqqCDev1Gpr3KXDE0SH +4blXbKIEqWUYU3lU3/uUs8noaARkaNYkvfyNxKXXnyVqKFAPEZkGU+Nwp4VrtVp1 +wlqmnebzxVDWpxrkQrtr2sNDSYbJPC5fQqx4DyWctPNGWDRKmac25dW5JSbkFkVY +nBqFu464LNMtNS3RUL6cegFcV6Put+wYdqzV27BOaU0nnPGOrf2zDsVZdg6msfNB +eN/gABzRgW1iCuCItkwv2uDlabW/S6EV3Rkz9EVXNNoiPC6OwjVZAPvbB5tzmA9y +gCAsUWYjWH0VR5HuNmUIu76pDuGQVz7dk3xq6P+KF7LhX07oq5wAcBjxD66tKFIj +dIMfnJqu3Uy4UkF7cExg+IlZYsYyC2nBb0o8qDI+eVCEN5iLR+fr3OFKswhqFdMt +VWGcuQINBGGMC8kBEAChBfP599l60dioP51mR4s10mifMY/Ot+E8z8oAvvq0bQky +6Y+BcOWghHQ9dKsJ+UIQJhQHGKMVqgoVIy4rC+nVXcN5tLec4b8pKESJuLdcQ7P9 +1j03v31XvbpNmAUuUKl0xEkrHsRUlL9yfC6M8/PnZm9FImJmQWCageyl+T/zlDzy +LnZwQ7ko7mCF3haRBqCTuYpT6ICuZ0Pg/itVuje8WNkFH+kPH4Z6JlTboNoVf/UP +xcQYrnCwRtoQPdJb0jz2pTjKqtBirrKewVPE4meoZnUK6Q+h+yx36jTM9IqvP59F +/sW3kkQuHVKZj22qSyILBHxFJ1qjndjkIe5IX6w4bqXIEZWgXBJJggYqeBqytWhb +mx836Hf6oR6wlytG8M0NgkMMziPzK6hpns9swIdcPngHLn6XyNT7WxLZwMmh2xEd +P53qzyo8HAl3uIUQzz9QabOvUyEiw4PNaxyuqpPvyhXcmlRjfSs6NRceYyhXdUTA +lcKKMsZwNZ/i/rYYME5eVtEpRKmc6ZnbDRk+2la2RdJikRVzP4LAUut+yi5n/cal +qKW4685BC/aDmCWmQLAGZtSxWNBeTMnp5NpvVG/5LLSBHuraJePiOORXpFCdIira +BWsrHj1AfP831Byj53MMHS8C5Xr5J5JiQWKhxd5ASWPu4DjT3kAkRVZrvvpPfQAR +AQABiQI2BBgBCAAgFiEEQ8Iwbr7T1TPyz7qKLHXlpJbICIAFAmGMC8kCGwwACgkQ +LHXlpJbICIB7EhAAg2uspz5Vsw6QK76ipdkSAgUHeZU0MU3/af6qqrkseB1hAnck +E6fb1hUeRy4o5550eREgMi0uJDTqAoXvZ01oIKrfdZOsr1xLPHRrziBDvmSZVQmt +tIoMuEDhD8Pf7PNVemAIKQLqoleHeXKSlc1FP6DKcAIJK7jvIkb1alO9r9gXTQrM +8rHY4KSRh545HtZva6gBZjk+RfpQu6Sg/dMlwlDxTpoH0QjNalwzHD09sK9DrpOf +OhdTb3dYMBAMPyPWudUW0JbHhlJMqykCWdMSN5FxQIDcz4N4sH3idclOqBWzQq8Z +igf4cdBGaegHPGxOEMRdAKDOkVxP2ZwxJBLUFBThD/CfGRGhnwNTYoNpaPPekRPW +7Yg2JCnqI2pVGQBETX57J3wcQDb/TXQ7VP+ZttHMkGkU7IGoBdlzu9hhPapUs032 +Fy5AYoRozj9SuLKGbqy7VkvtEVZ7TeKaZO34fEJ3uRkDTHx0TQtqwvs1b3U1fWJj +o7469h/jBIPHJojx088Om0pMv91xJ7nQ3xukgVw9C0DZfmBX3xd2bNbyfugT8rqQ +A1PPxm4/KsXX/IZZOuM/tlT0vAahQsvXMNUVMg7v/PWuB6V47UdenKpXd10oloF7 +MMtVW5sxG8OoBpUIhJUCtYTlwGCyGWSR7+rsHSR2HydLk1RWcYNI3XgJ0ng= +=+gLd +-----END PGP PUBLIC KEY BLOCK----- +pub rsa3072 2022-07-12 [SC] [expires: 2024-07-11] + B3C6A14C13B8C6727BC2FD2F07FA463F1C926F48 +uid [ultimate] David Riazati +sig 3 07FA463F1C926F48 2022-07-12 David Riazati +sub rsa3072 2022-07-12 [E] [expires: 2024-07-11] +sig 07FA463F1C926F48 2022-07-12 David Riazati + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQGNBGLNzqUBDAC9p7OMYiiVHTQIIUr1/fDXaJ3sJ0rlkaPQJpPBrtuqGjN5utDu +26BWQqPxx36aABw44UmTRwV4UNf+3McYSJoCODfVpHOKsKk0Ql5CDzG3Ngpdu9ZR +UxV6s2DNHkSUjpd5vRfZF09WnQ0WITEhKz8Wnm82B/NkvRmTzYqlpP+zOT3+WPFh +5maMPOP0bvEfiT22zQqOOyKraYPrtf5ZBSip1fYohOlyS/aJcqOChMuKMOBVrxqH +9EmHjEkN0a+nAdWnGmCoGZONsD4ifXL17AUOaGSpEko6Nj7nXyTKI0laBhj6f8uw +v8M3xDBkIm7oiTuwrCeDa4e9YtP6Vzvj6MxrpNIMN0XRs/DRYH0lgTI1Zv/0SzkO +OAa9tOCiq95jkMjZik/vyQ55WwkMgYDmngsP/PBEW2ztdVLoLeal2p4HNfBM1BQO +RFOGnurR2Vmy1jGPyfpuBNMyjRgFC43s7SLiTYKCi1QxyY5u6dRgjIxkG+jyiY3B +GFMAtPt5iJHUox0AEQEAAbQjRGF2aWQgUmlhemF0aSA8ZHJpYXphdGlAYXBhY2hl +Lm9yZz6JAdQEEwEKAD4WIQSzxqFME7jGcnvC/S8H+kY/HJJvSAUCYs3OpQIbAwUJ +A8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAH+kY/HJJvSJHEC/wMgDH/ +jBI6AciNp9sPv2p8tFRDywq2nUOameYFaIMq1r644IAzUUU3PSACMT1lCxGjFFsE +vJYx1GORrpqjwArrK1D3zZ0yb4X38DFAU+7DTGEKzKoz3h+Ka0GyOe90CI/KqqWL +XNeePwvOzIWhZ0U8vqUkgXwHyfG1dwocEx5A1zlTeznkth2AnRELnjhFcj28V2VX +dUQmZ8qOYxXtjSk9xJtQ/BbARiNINeKqzG1aPWgjTtFFp3UTl/jWCr5RBlWMA+BU +N9alE/ozRPx89Uilz2reaC7xX8tHv5F+P7SPVwMhJyYQ7F577CtM0b4vTu4U15wE +VlWF25ymTbSt5kam9jFbeR0Zkc0/LuLEdGWRGbDFI9Hj1rGeBejTm+PjwK3TidDn +KbvpUgvseNfqUQPcbjEsuwYVUtR/LEeQxt2tK/odQwWlHR7BQApFhV7VSJVP99Fp +YNFN7AsiD7+k4fOl5Qeq/t6X7x+gXMkxsRvtJMwB/fTAWbuBxdQBdIkP/KC5AY0E +Ys3OpQEMALhC8woP92ONpgRKHhH3s65cY4EYLhfhkbOqU8KcbPJX0qx1gM68jWqm +aCvez9KO+aB2jEyWG65XsOJXM6RqFtgvFMKG+ETLIgPydqt9l4f5AhnrPXmrxf7l +b8unuFMyoga7DyKnB6hQzEVqZgbKR+U6lWaoFtGTFYlaOdUz268OErrW3592frh0 +VKTdCyBdGPfiwKnzL4+LjU7SuiI9r1nBH5ZYicGmgOKQHP0KQRUy66Cq0S7p0rpp +9owbh2FHkXJ0bryl7AMV5JurEk0FSA483qQjyqHEQCSKVySgUBBFw9UPH0LkUbYv +jk43VFoUYexlJ47KFIRJdQZdLyyqsSy0xzqiCQXFwQPECIFHN/GTMuAHcaCfah/z +u4KDkqArzNzG1pl/DYVuaMo9LmBtzB7kfxPKcvm0atp6WHydcQ92N9ZU9z2zBh7T +u6Akzl+eONsix7F0oldwtG7Glic+1HafyyjhZfV8o6r7rYURnsotDfdzYjpL/xWe +xWkUSv2GbwARAQABiQG8BBgBCgAmFiEEs8ahTBO4xnJ7wv0vB/pGPxySb0gFAmLN +zqUCGwwFCQPCZwAACgkQB/pGPxySb0g+0wv+MQO/9mVo4eblTeFMLpLlU1tbDXIF +n5bDxbd1ekq/fKLrWZpT+MQGprGMXbgTehgeBIMvFvANLr2KHUb4HpXTX1GceVHv +A5uN/JQ+/H+IF3SoipcFPDR67uESVSZQfrky6HG8M9hH4OPdW4LbyEBke13Z2LlK +sQWJFznDnqCqmvLDvvliGBGhMM3RvTn5upgA47gwcJ1Z4xZU+k1nyhAiAgxGxpjO +rtj/Dv7r7gdnDBo5omu0fQLqulSY1UeHsOQXlkR6zMOMDdKgybcScQHQhta0Hcs+ +DWxpfJ92vH/3wGchSA1f0Fp2WCiQ/wp7sfe1esShDN12AwlpDBjK583d0R+DLpVY +8DbRCdvtwIN2f5KD+LhBbBX66AADVKVRIPgGDRGxc85X06nVWOQGHrGD+tCjxBNM +aLLvg9K8HxeWTvQvowCAyFJo4NfIrS/7gMm5JcWMAqVFJ+IVxZNxZUIYV0VBC/AN +rSSBN90DWxIgPhlAqgO0ofkbPSVwF/9i7nd3 +=XBuV +-----END PGP PUBLIC KEY BLOCK----- +pub rsa4096 2022-10-10 [SC] + 1B63BD2FFF5E515DA1BEF393C9A56ABD5CCA3EB8 +uid [ultimate] Andrew Zhao Luo +sig 3 C9A56ABD5CCA3EB8 2022-10-10 Andrew Zhao Luo +sub rsa4096 2022-10-10 [E] +sig C9A56ABD5CCA3EB8 2022-10-10 Andrew Zhao Luo + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGNEkfMBEAC4sFi6Msfxtv4pahjdp+nmfwprEemP0inI4yiuT9m5eEzc4/0/ +EwPHw4Kwx4SQypxSJXqMnxSI97w/54LW0Gob9hzRwcCLCe4zPR9YnJQ0JQo5yrjE +zo9JvgyIGtGhM6rUTSMcCIO3eYb4Ogwe99DoWMz4w2NA9wB3nzkA2zL4VpaM3Ou/ +F54xlMLE7ht0EralHcHZUuSmvpzm43lScE6LwypFecNvfoBdiJJ5rGxbFMKJRGeF +GGCZPuLy3EBrPbHe8cjWfgBKNj3XTl8B8YO5oEIZZPCpo3ML8DuD1Mf93PCk8Hd/ +yr9U7VMOXrEEZpTjGOEl3oL+5VVUFFBDs2tuxeBMExC90sEkoXDtJCRkSlHHBWZy +tzTqwN9GcLe1N5YKwEphhnmN7tp5rLEJXdUasJ4RlWQHZqVJddDELLb595FYgZMe +2dBXKbXrn8NvJBJf5yeLkSJh9gdkdtXwX/YN4D70LYKLz9+XZqhg3iPLAdrY+xVN +lHtCZDKLSHpNHWPcqnBIcOre6ucBJu52S3ZoVtH/CCQrBkuVpXWSjnL1wCw1Djyx +cNSVSVR9/yZQTgcWQh1zmErQEC9mmUrhTJ19IJ4bpseWgyhIETzuSYZ0Xm7c3eT9 +FrogP/D/uCWwfb5DJUIIFBh3fkEMCSEpc4TPIjulpJL3i3FaLLN4hYURKwARAQAB +tCpBbmRyZXcgWmhhbyBMdW8gPGFuZHJld3poYW9sdW9AYXBhY2hlLm9yZz6JAk4E +EwEKADgWIQQbY70v/15RXaG+85PJpWq9XMo+uAUCY0SR8wIbAwULCQgHAgYVCgkI +CwIEFgIDAQIeAQIXgAAKCRDJpWq9XMo+uFYoD/4oALh+pdsnFwbyx1ycf3lLExwE +vMrmr+hMrodQqoqQ7bt+anYzA78v0HD3U7zsLSqhIhYE34Ib3fB5Rv7z6DcNP6pl +RUH7QU4DOyePRPRx/xYz5R3OkqVKrV7RUdzgXOn+5mujTJiRYzRbNyexg88dJVWK +eQCiNyW9j8M/+5a/+gWjehxyvSmoSv1fEFUDV7hjIinSApWyMm0Q8tzoqxPmuaTE +ll15VkgWx2t3bjQtfPCeft1eZ7Tb84k/PRN7JRFVEZYul4MtRSrJTDO1E5ewZ2qH +PV/2JQPQMFKpaDEMseoGk6/O9I76sLjTIjQ7mfFOnEBiMph2BCF/cpMufxLnE2WC +GyTCGVB+BPgvQ9kvD6rFTAHyiWetHA5Z0v/TYUYAPYIATk4N0Hop/2fx4NK4vWeX +ehjvgPzp65vRPAHiIh7JJM4yt95yMdSpo7sUuduefyMf5FgzBpjaXTb2nI5NsUOr +Ohh6MjaZWt1tZoNj7X81IILJJk4HDkDLpTsi8dDLPRzuHw7iNb9U0bn6cSqFW8JZ +M+U1t6jpdJ9hEDlBiJPZbH3Ndky+ZyDoQQ6zp2mGbgkrT6soFzIi2zQ55qEpnMNM +QpxR17BTJAJO6JEPIHhdovU/VDg8ho7blbhNFY/L8o72Q4RAnLW36rRBx+dsExHn +Gn6OvtU24FhEfPlWyrkCDQRjRJHzARAAvdG8QPkyHtnV4SyAgaMp6lIm31OglXQO +LFue4Xnv/UsUzXY8am281dnF7IbccnmxFxxlJq32lIav+L77I5wQUd/DuY3zj37b +RddyskOuK7m1skMXnBgJFUlfwE9H6ypr+HPy05VAnp5zsqelXhvIoJmioTFysmgi +IFZTUfV9RPp6ohO18r4Vdgyn0a/p+hCoNuxdjlZUSZ4WgY3b+11d+wcudUu2zfwc +LSuXpsp30+tox5vcn82fANux0fnxbpc8Ic00XlEQCeUphF9NxhBPGnPRQV12rBpT +eo+bOUp2UN3dEPgnYGWLBt8uuxVOr5XE1AwwlIokSdoS8zGVR8JPk+32PEW07Q4R +8t0J/MFacFlvHHpWkBBStXU2pzzLs+AX5qO7s6XekqpXdb261vSEd86jH6ndqIo0 +KSSPlUmBi4FAKHKZIUhdSM0waR9CJQfYUWGqXLJpaKqKTojqIuXQWh4S343H9IRg +n5nbihuiko8UrrzofNBb0TXfPOnYYjCB3cFTVQzIFl05aNGs5HQGLX0wbqD7+kfP +m79b6p5SWLoNLmGNLj0dDcBelw+nAPhbOIn1rohwdPPJt5gU05BPv++X6CzmqFEA +pVx01HXnbX62P2HT2V5YavLPw/R0FrXOB4ZWKH/tg+BPMBqS+E5eifadvVvKH/8w +rc0Q1UwYxB8AEQEAAYkCNgQYAQoAIBYhBBtjvS//XlFdob7zk8mlar1cyj64BQJj +RJHzAhsMAAoJEMmlar1cyj64hjMP/juNX8sFXlNCyR/HHKHwpfzn+nj6vVz3RgJi +OFhf7HYAKh37yMizF3pN7ueyV55BBiiISQNbxf5eLh6yCJ2NGkun+mTKPow5CAyB +yFS/z6zmlGduL+L8flI0Pao0UJgryhDUYkNrR5/PkZ4ksPKyI3sLlaoOPvIQAlk2 +aw1BI8RzTo05Y9OHralpFV0Nvufjvc9R0Q0934216M7NNK8nUSxXWeztM0yBHEIi +V+/XY821F+yO2aBhHqnpQeJ1+6bc3UB7sbt8xA91rJ40Kw7TS4FGbTzQyXKRBMKY +LoZVF61lRUoAFY4Fh+dRKAEel8ZnBhyHEyh5NCUWkHJNWxpPnl/XIVJZ3BbFbtfT +W/CeWBEAkrJnCl5CfpXUyZRWYk2uwR1tA7apV+zJpaPwojnY5s+2IhMPrTdkxsNR +zA4jpYkRVEwqy4LuLLbiVnTPba6y8DBiQ4by1m1CKJJJ09BMUKff5v1xSerONLBM +uEKTrz3MJLLh1sZWkTO04K2VarbWoCygydcrxc9PNOuISq2mn+g2kzVhnUG45YnQ +RRveMKZ6+uqGzsSYwp+lHNNso0ey94qgwy4qubT++rLZZ5eVqBSUWCsGoEayDBOQ +v9YZLKL6qfuWuYN7rDdY1c82kPPmjaSkpXiPP7q6v8vUOGhnMFOAUNfxwpXP5Hs9 +/lFRrVmO +=rAtV +-----END PGP PUBLIC KEY BLOCK----- + +pub rsa4096 2023-02-02 [SC] + 466390C69A21ABAE77DC63F128D4862222B8EC31 +uid [ultimate] Leandro Nunes +sig 3 28D4862222B8EC31 2023-02-02 Leandro Nunes +sub rsa4096 2023-02-02 [E] +sig 28D4862222B8EC31 2023-02-02 Leandro Nunes + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGPbm2QBEADvJpyP/sRJeI5GtkZ49uUeICGce22kzl2GtcB5eOTOJB2M3rJq +AwhdhLr1P4uIKJh3EySW5lozmY//rSwlh7lpdTmu0+o0+03UBTseKUFVGMGMdy22 +7jzOkHjViYFXl9db/BgnAz/d9AmYgTzlhTwW5kv2gl89ifrNyywome9N5CmhjzHB +5HDo47i88GQ7auLNP2DmGyAnl3Fg+B8BIVCQJchQbWRMktZUj2HfNhLT6sGMKDHb +HoPM0rzHAR9ZzOUxaCSadbJXP2v61BJ2pdVW17PB8UcXnFwCy5aax6Rn+WfNl3aD +Oj2junnBfXvqxc5GYDrporcB5+zIFYb+Ec5Mz2daWRDiEsYcmM1V9g6yb1XsxYyT +cKWhMbOlBBMJyvd38GbJuI+yraURQWLN2WyajWogD8icnaEeq2iamiIDqZ8yiLec +3TbHsl43i6UJOCJhww6SCuvaaLz3XKNVmt8m+PLmFAqjt5O5zy4cE4JGGt0U2ckQ +vlZdP6fIfPDCh2FALlohKL6s8B8mUB1QdFJGgzK4S/0+TwA2Pm+2lE6PNQ6SBNzO +Whfvb43Sll0Eb9xO1ehjE50Vv0lMn+0q8PrDL1FpRFtpqYMRUjLEDWAv6fBUBlJ2 +PrQaTLA90om8gG2sb7p1ocRc3UJdosDLVw9t/X0go70Cw+UVha7LuTgawQARAQAB +tCNMZWFuZHJvIE51bmVzIDxsZWFuZHJvbkBhcGFjaGUub3JnPokCUQQTAQgAOxYh +BEZjkMaaIauud9xj8SjUhiIiuOwxBQJj25tkAhsDBQsJCAcCAiICBhUKCQgLAgQW +AgMBAh4HAheAAAoJECjUhiIiuOwxt00QAMGaQDaC//6Vm8P8WeEBBTKGGeQhVaDI +E6JSSpKg7f+x263S6BAGAuJ7/kR+0vg4MjxcQ928PDaE+6mciqFctKCag1DNDSAv +DHrbZo8u3fAjZsLG+BZU00nCITlITTFS2stHo8DRpYTvpP30OkW7X72mkJONu+CE +902nivcsfA6tZUXENEiJFddjxy02o0hKDRCpcn+85yEIFCs8HAGHdhWeacpbhnc/ +/CYrJBeCEzANJmK8pdDkcc0bOfSd6PjcbSErhQtXydYHdcrGMCsD+pfoO9wyh3VG +h9yb9OXGS0iuK9ZIqLe1S2Qdcd1DcNhQRpFIly1ryUxXtY2db5RhpZ2dPuKTD6v7 +TnBVGwCzhmLJGG61WDRwyAGRZ6ja17gy5SGWzBaZRvVlp3d+AA8mtkxJQenrB0eE +Ch1YghlHU9KGPRl6krdHImIST4/FJKHHdA96jY/nMOtlN07kXaZqPLByQXQ6P8lB +UPtJzTT9/yOvt8y82HDy+HFnwrR9W3111tIMTDsz6iYqA3kigk+kCMSSipIyE2g8 +KcpfnE0cHZeWgZBRN3WC/+T7fDEGQdxp9Bx+Hx1bW333ecMao6XcN32JWGn8Xg3h +IRjFWF8g2kowKSKlKRPD/DRCNu3tG3N4JFAUIQvHZWd+2MAayi7vVMZlQql8t4Ns +BTxcyrhiKnm2uQINBGPbm2QBEADRhLbjhAtn80vDB0WD/OITbXSV5zLYqtQNgoLb +RD6bLMX7QVH3U/UOdlEo8RV5jlG9qRW9qF7tTPiWIr0Fyvb83qskGPRxO0xPrll3 +wqo2xqRQdSbyFPt0vNSLj9qQdXSNUenTQXoBkQgLnr/zENKN//NTavvSLmmZL9YD ++O2Oe/hcxMNrRtvMYo77l3mfIGWSBQRiP9IPjdOQZjP645CozD8dsocUDtrgmvuR +plG3PpFJidF+72aW0J2/hbNms2uDL6fEf6Nno9Bd3dqLLXSIp2I4iSodqp0IRWs8 +gPOritMLHWVy0tFsE+xOJpjLVDAMZTqkwkPHeB1By6x30ra4m6uaost4WyGt1mRZ +zxYk3JJ+YKt232pQJLv9z3cuH37g0XuYWLCmUj6Hl6GyLvTW8NltsBq9xRWXc1il +t4P8DgZoBPGpVWxqN0gUaWxiFzjfJl0CeYfd1qSojUVSlh/BDvMPPjo8RX0wBBjV +hL9r/naBfMdOs/9ZiqBnGUUe2xICLUr0yrxLsEXvXMNEflTI5jNuYCrllqay2ZOS +lUrL9B4eTLO44k0lFt/Omh+fGgxlFYkmQT5C/JGjxes4llUXuK/z2uVSX/8RwF7f +YyHFrMUZnI6sKn6L/ula/Tlw2vHu4RkeLtB76ytxID39bn7AZAzU/I+SLMHkT6L+ +Jzs0cQARAQABiQI2BBgBCAAgFiEERmOQxpohq6533GPxKNSGIiK47DEFAmPbm2QC +GwwACgkQKNSGIiK47DE27RAA39cv2uS59dscLAlsuQ1uYhXRWyFY6sQ9Q+EeqsbE +kQRUDsRFbJC88b8wdCjYPlVK/X4qr8NMtCS5GVUXQQvHgabf/xlzwZsBIHX05KaP +J0Pwpvns2Z0cYpZUO6oBLt3MclIx9uUDU34fBE3x4QkTjx919K/aV/h4UfyfLlas +FwWd9jpCkh4SLH7BwAB598Bm0zbKIjLccqT4pg6pOXB72YWvO7jee6DBlPSvwqLC +PVSg4dDemSghdAAI2QDLCEf2m6JLdNKIElG52vKK5zEb/DW5TZnHdfGC/+hrxa+Y +ZzKDOvZVgCqh+b8PGQgFkAiY55qmsiGSTt5xmkOdnMMK+Vnr69RDwrkP0wpXKeV0 +QU/LWos45t1YsvMa0ZpO202CoYcyJ4uPkpMeTM3NCu4mpZtLEf73If/aKM3E2DB7 +76XtOm9p22qRXZmXj45sBQ4/hFNSxWrOxIMIDACSfbMt0sp0wlZku0yXx54UIrRP +g4dZ8lcqaRNBG3mb6OxcX7wXb3krbjETds1yzNhDimS4fND2JMThmqRNhvdbKaPy +eEwpq1kq8iSi4BFiOc1j6rKwIkJVlTJu/BBxs3aSo2qV+/KD7A+sTZm/oXulLFAG +FnADTX552Ddc8qX/P4jyhEA6h94c1RWUuMi8dVTDnClEOfjaSZ6cikte6UjON2V5 +tO0= +=PI53 +-----END PGP PUBLIC KEY BLOCK----- +pub rsa4096 2023-05-05 [SC] + 664EF29634C05669C3DCF83106D051CA84EF3749 +uid [ultimate] Siyuan Feng (CODE SIGNING KEY) +sig 3 06D051CA84EF3749 2023-05-05 Siyuan Feng (CODE SIGNING KEY) +sub rsa4096 2023-05-05 [E] +sig 06D051CA84EF3749 2023-05-05 Siyuan Feng (CODE SIGNING KEY) + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGRUb34BEACyedBJjD2tYY86mIr1OR42eR+w0dvh/cECgp4UIm5QG6z9YXrU +KjHtv426uKCloYiWU5+b8ASPMbtP5q1rvrRKQuapbDBN2qlS7E/PScFHDK50ydOA +v8melfp8pWLW48kE5EeSdvhF8U2QzEqT6EYmNIExLYjSV1+Jck40DbVL+ak/4clB +Qv/l0DW2Fw1u6GAKaNgnWgZDhc3os176rS0ERzflZeF+rqskYSb8Uy37vBB9By64 +edIsaxxOEqvpJfa2Ar/6vDnBplYSfMHq9kqs0vuAVu0r7Dadl199oVMmUwBPD4uq +IVaWoetCbR+DpVIZZ74i4kOH47xfh3kZ5zvWo5E+PL4XH/6us5tp+nAnCjhthxY2 +tpBBZo7M4qgwNOxk/3zsysZPYPhQaZUx7/LBaApOhyWrmQYrtZpvNjHsoEToHtDj +wCSwDxf2c6mrncnmKg6UfsWSEiIiPcsYqXkp2Bh4xKzdI54qCt8LTPGKjTPj4FO/ +EccT5Ad8+lOKSOIugbNmECEbpVUhrlDa/yYtkosHbnQZFgBBaa+RCsRdbGpuuc+E +hqEpvwbPq0J8zPJpKmyeu/S5gWww29ix39J+F5ZxjQZBSUUPRsnCwHDdkgFBlDdK +ZeQqlKjr7Mfp+LlI+7HIIxO9HOPN3WIjFyWPm10d6cWHN7MDxMySP8l1+wARAQAB +tDJTaXl1YW4gRmVuZyAoQ09ERSBTSUdOSU5HIEtFWSkgPHN5ZmVuZ0BhcGFjaGUu +b3JnPokCTgQTAQoAOBYhBGZO8pY0wFZpw9z4MQbQUcqE7zdJBQJkVG9+AhsDBQsJ +CAcDBRUKCQgLBRYCAwEAAh4FAheAAAoJEAbQUcqE7zdJsZQP/0EX1XKFDF37c8cI +jQEeQ44Z3F5C6cxM7X9efqLQzDgvVRR525qjpM9uk/usUdKupwwX0GkJQu+JbRus +jFCi9RaOO88+w7ihS4qOFcXV0CXHxxSKkKfKU7DZhEprJtyQ1QE38gWlHKB/E6Y+ +oJy9EKMZMXP28gj/tUT56IABI6X+b1BSTT5PV8QURkkTPDVQDpWo/AmtPdei2bvd +KnTZGAjxv98rMYvjJUGMPx8oA3cqDRlIltgSyXlwht8Ig/wbUzW/oRmzLk3TdeoE +42/QHTLjfDlSm72V2B67QToYo/URIxJimUvg4/+VT8ByLxPzkL3jYkDh6K87JaB3 +9XO8HjNCH/xfZtMSQjmApInpk7VjDVhDC4Dlf5t41pUK5KvGsU7eLAE0jL/R/aA8 +z5pvf3afLK3Bpj2zKvy2rFkRmCKIS7mycogBUdOk4GT8ZoLDuaTmUcbfx9H4/9Zw +UYWCw6cJzD3qICqCcszlfW+99b92JddCU5ITMfwuWuY/OX/LfpwibAjzor2TFWya +CVI7kkQj9C0vcbpxgCMd4HRMV9p2CQUkvPEKfaPg+kzJC1Yz87DC6aSrLIzVvcIj +LZ2yOzR4QIeTS6hRsMmQRGPZO0KFres4760BiUCH0gid6LWDNq2YTXqdNu1ffQXe +PV8Risr23rrxOTJqlYX3GF+Xd7C1uQINBGRUb34BEAC2q4MdKGYgsl9BpvOA7TnN +kBtc8Gmg+DOdjBhG5BCo6h6U15RxfIvSikRi0Sz3F3YZymGKIeJp8ug6brY4KWjA +7dtwqlvnthyWa0mPrgHZvkIM86URO5wSvRMXx1x/qWJ8BrOoCDji+fmC3uI9IbY5 +RvkzHACYz4duZM54ZlhM6lOL3TtgF2OyXod2MFwuC5WAAPuqAG5MF+gNdf5JA+p2 +RfDIGeZNOWQVWi9CrHWt8fC80WG/7r6Ta84yV6KTfqhsXToFZICVXt2BEg7K/UzI +Ip7Zf5rKsDT0iDxtiJIbwFBbTS0hE3ICrPWVHPNRVsqp3wHDkjjnt0aPL+G938i/ +dzOwHZIf9nPjIrX94DvPpOXGrHsW6JyMHZ/3diROpWy7DzplW2i/wVftzkhT4GSN +xARgLJM/iZriOMYvHafpOm5OxkfFzeJpnjZrRTJKNCAQFbdxI9pzR/2ingvZekUs +FYcauQVuL1MbamEhf7pRXHOS0bOPAONpeXl7aNAH4jf8x/3iRHRQYcpGRT1ESddO +/swv+Cj2qj80vzF/oT/QMWrWDGHIiriRDSf1WpFHVqzki1jY6znrw6EvGsowbzha +kGC9dRzLm8P1aCwjjsU1J82yEKA4oHwAX6rlkRkWVUkTMk3G1coandbgcbnI6ngG +7V932RapIOZB06rh/6mGgQARAQABiQI2BBgBCgAgFiEEZk7yljTAVmnD3PgxBtBR +yoTvN0kFAmRUb34CGwwACgkQBtBRyoTvN0kLfg/+JOmX8SZLksdEo4H5KmeHVOQk +EpWlFk/SmSoxV1k+kz68B5gxBPWQwRj61cGoBFKdvP2s3BSnKy7+iow0uwh6KIy0 +zMooEOqCr/kEeFLxq79kFzxwwDSkzUO1UwGWCzVGj4V3UCq72xt2r3mJxRLNijTr +JJFe6+pLFXRbgrZ5ulWGxkiZRK007fPqtkretLiTyUXcJzU6HBUi0/pnyA2B0mWL +E0HO0TdOPIDTH/t2vtLZNhWl2T0lbjtdL7IcPAKQoNd07GyK1pPGpLQqP5gRrpFR +zrbslqRvKtpVjL5iPQCtv8Tc9ovOIQVRXzuXm9W1OtgFVYH2GQe+vbpYhLL7dOfF +Cwo6YLQnc2DRLAffy6G5weLJYYE52gMzb8z++Ys0A+XBkuVvHX1EBeyWy+OSuFSl +Aujm/jMMgK7dtNIHXgtVGEAKKJb4amc1wsZ9dmUyf1UyFxWlDUEaLq8+5Ut8a+tW +pTwwQLAXPVElpi32gLDP2rvHzIw1Hs0MpoxwOOjH/QCeRQ/V3acAUVv1JX96On0t +NqlR/Q5S24ktyC1uy9oLdIZmgllKUb8i6s6+XSkWRata3HTsfySDXMdntZV1Zrjx +WTgrESErlqNLN5ZTTW/1jBELJCfJKxgHUip+Yo6qNZoWwNLP1BaIcoA3miSG3DXf +wS/UuN04NxDy7V6mPXE= +=MTba +-----END PGP PUBLIC KEY BLOCK----- +pub rsa4096 2024-01-15 [SC] + A4D9228E55761E665BF01CBB5CE869CB7DEC048C +uid [ultimate] Star Yuan (CODE SIGNING KEY) +sig 3 5CE869CB7DEC048C 2024-01-15 Star Yuan (CODE SIGNING KEY) +sub rsa4096 2024-01-15 [E] +sig 5CE869CB7DEC048C 2024-01-15 Star Yuan (CODE SIGNING KEY) + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGWlStcBEADaslyfbUNARhWftJoRAChoak0cFU6NxahhvyZfyTGtSuwuHNDD +2eyvhnDIaYXVClxoNgikiQ5Nkd1jtbA4rFCw6Pdbq+98fkpcr8N4o+jlbpu6Ff3j +dJ2Qu000MV5qe9FZ4QasdfglJElvizgfNbJv/Fz1ERl/BS1U0c7lyQF9jGGh7EY2 +1y+JFp5OMG6A9SpfaOd+iOw5/cfCQk8+sHQC4dp3hOJPK4NLvjotK+hlOhRsF7gU +goYYT2IP56kPQb6U/Uiv4/R6HbKugzqSMl6BMwAb9uG6UX0xUfAA8ciHoaITCJCQ +9e/jGWnDnqYlAMNqLkHEmW7THxJ3hHXcac/Z1C3PeLDJU0rpTxDcjuYkM5jFCu7H +TgT7lWBP/PyAAVSsLqMQbLJOWm0a14tb/oRoeYr/B2prIbJY5qJBM1nherKGMg0G +7Oqugo6A1VqgUxg7Chj73PledaNwvm5Lxpl6D+wPDSifhlz0vnwOCMoOon0pTjK4 +DXDEXnEXZtzkZgXI6g7AkVyt0gkqyUi+01ibmlBfcVHh3PVvU4oNdkaywQd5s29R +DsA4WOqt9cLv+iqIzM1juygfR6ooA1jHDIyIPmmC/kOrcxKXEFvIGXDDCbXAvdXc +uXgZeZqI3pbKjQaU3fF8HwJ956HTM8rywtVGH9BWRl/i6qn5sq9CcukcuQARAQAB +tDBTdGFyIFl1YW4gKENPREUgU0lHTklORyBLRVkpIDx5c2gzMjlAYXBhY2hlLm9y +Zz6JAk4EEwEKADgWIQSk2SKOVXYeZlvwHLtc6GnLfewEjAUCZaVK1wIbAwULCQgH +AgYVCgkICwIEFgIDAQIeAQIXgAAKCRBc6GnLfewEjBAiD/0cfaYfQ0DL7CPsP0lS +yezPDDTnDPIo//G1cuSYG0gnXQ1SpbJSzDE7deew+P506/sWFneOY5Kuv6DuSE8J +nM6vv1EYR4/9x/XstA4F04lQPngKKBV+UKrWj8zIA2Drn345Ece1150bWvrUD7mT ++ps1gfe8SGYpOmR/kRc8qra2zizcWBC1Dl4qd+RcY7Ac6Cu3G/JG2KvZnrUSVev9 +nzSl2V0JtFVIla2odSJqv0Zdj5E2vLvQd3Dxbf3BODCdL3iQqxrQhj+0T3QLEhPg +y2XOtqW7a96XosoQ44wUiHaS5LwFViG8LoiPADtSdXYb8m4FtMfB8t4mzXVqBjpz +2csMqOnNvo7bctfpJkjM14UKib39MR2wUv9fD6Qa+OAAIeXGTQH+wlXmlYjji9+A +4tgq/+d75qUC/tyHSgbZLNXobHF8v77g60cBvFXVL02W53xhVDZP4gwu5iSSN8BJ +a2hqwo4UO53mRUNkwFZONYxJE7MhLl22r08eu0xNYhoGtpHzDVoyHg26+2FUgFDd +TNsdqjMyJ+3GXEE3PdKVDTj9To+RoHLuCczk5uvtFYGhseRwIWbVhmTLKUL+wgSa ++b90slkv+CBJvLjvKbVCmCLXwiH8Cx+MZSu0oM5v8fbHuWOhkb7bJd1V+U7qV/OA +CCqBICt64F+ooQ0oEdC0oLvr2LkCDQRlpUrXARAA1DKsF2ZNUdPIn4VcsjRk/+qF +13VC9SaqMp+J+8m1XTIeXdr27uUa2vT4j8pAM4gwMVkpEqE0rmHK+S1SeEAlcizC +Bvp7vvso/glcOg9Sgt9PXvvEDPL/Hnsn1+3YX+Gye4cOTiDDgVW1RKcgGj9Xsir+ +5BS9Secj5CGo92cuaqIo/mMjxGlsuW/LvTU5qQhz7aOaBibe5EHPlGMqM6XJN0BZ +MHRfBiGDs2n/egMnTPL0JcTlAeird+yxDPULKzhQWkd8rfQKpwcRiY6IcYFHlWdM +VhZkXNRrxh6+q3rR7FKmxlvG/12YyT6Y1BocGLgROzKIeoEp+6vsU5LJ90jy82ig +oGSHwNjm2RRukjV3eebovl1dCo6IaI/j4idCv7NlcBnln/Unk4YOZbneMT5r+3Zy +Q4azLB8KHfHOrUwAxRAGPygdLtqbjs4mF45HDe6h3IOVoiOQlZNpesrwEumlK+Il +taU0T8hfxyMpIcTLUZpIddSxo0sVby2XZ+z00En3JvtqbpRcfA87thxpsE7uHxwT +YT8mPPDxo1R4I4LSzsDnekD8EB/7woz4n5I1RBoPB1LSoo0B2os+4vHGkiwZ0TN0 +ICcUYdM623Bv2wJQbVKEDvwjHZTkotjLx7R2lyqMRwFYrMXHxevOfbARJQCqrcY2 +ouLzQme9rE5MPQbKj2cAEQEAAYkCNgQYAQoAIBYhBKTZIo5Vdh5mW/Acu1zoact9 +7ASMBQJlpUrXAhsMAAoJEFzoact97ASMNsIP/3tlsvwUVfy19lUjxWT4rPw2GGz8 +lbPiaetgigK1F1rlzYnIVo32Fcj/GNNwWEdxxEzeaQR/AJmZLWB8sBDThoTGeSDK +fjKXeDjZh+ElpIKWyk7f3ddHN2TpBz698kZ7fYCciRE9T4d3xgbqx2rCfupxUFSj +lxLFRkasByJnLdAZI50NZjW838IHMaGsvgbWEqRuvKZOES6gFhrK1NTSxj5iuiHk +Uxj1KzMhOW+m1eZ0pQcCVXJDY6KYhmrZzw9q6kzSO9ukmS5yRf0EnD7Fsca4iIXP +Y28xs3zBxYHV4IGU1PtcIwNewmTnjnEy0apHPz0zDplHi1meXuhA7bBMjs/AouJg +6FIDNSQqDuFXufqvVQ6LZZgob+LklMAoGcka4/5ZLPjipj5SWNeZZunJujSqWK7f +KJaIfn7ILXqxjaTFrjBN3cm60rO1+zEektrjtWMmSBn0L76pY2ucenrqewruYYdD +12VQra/6QAS5R0HG8gzOfsZcrHaiIuLoTbsOgnqLVcdb9lO7f3oMbKPwejZ5yhyz +SraXHvmixlhf4uUYwsWyhw3UgHrv1psB8Z9NfdH9/T2BvRg0qy6ZmI0n0OagPNgz +v+SZrqrWkSjyPdl6j7x8EmePfNidqw/CnncYI2rEVSmP28W0Uhg5JLgroGYmycv6 +HeZaRpYvkV8UNmnE +=BtHq +-----END PGP PUBLIC KEY BLOCK----- +pub rsa4096 2024-03-10 [SC] + 298A8AA3D25AFD95D5C89C63C8815953907B66AD +uid [ultimate] Ruihang Lai (CODE SIGNING KEY) +sig 3 C8815953907B66AD 2024-03-10 [self-signature] +sub rsa4096 2024-03-10 [E] +sig C8815953907B66AD 2024-03-10 [self-signature] + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGXt0EYBEADZTOhlwl9kdTQZz/Opt7Kso4OtZ0LqdT9O1wvB2VYODsOXjc00 +DRwbwB1K2hVjVPGJpe9aAz+BMOfxox+Ncs5a4x97xn360gKra32mvfsAnQD+g0aU +TmVWU/bhQvPSjlEYrUrvkcGClJ5QipxUWb31HNle15PJBB06XShA4GLBIhElMR2S +6H0EkghpWhfqnAjDXrEBTVsLm2wUFQUXXdqG5+CxtSKi3ywxkMyrPS5ubnylQDlg +lkQkAKtzcGcyMGoTEuP/oh7LnUOFbzoUF8lg87Y3z3ERzxntmrTfNQhkHBI6izgk +mTAmUjnm9wpgr7NyTv6HRa5UVVoDmvENxDaYa1FHt+N8NLQPHZp4Ty5tsUpcY2fl +1FGS1Kmao6cHZFJy65eUPrC96PrLCN7OTL1GblVlFzsMcjLBUxXvTs/nsFcAIMZl +OQrpEnjRKyXE3gKLQd1On+aI7oZknND0K4cJMy7n99R62/yC2UFodGG8hNxwTO2l +5nEfpYBq8hOfgWRqnhoJTJybowzv+aK0Gq/52cPxi69xEZEyGXf0XD5XMmfzmrun +lNRRogq4WBPdT7bRZUHJS6ytVg7TuS4LEhlualxasNi8PkZuGHXssUOXSbY8Gjyd +IJIdvTjkClTobCiJFyC72fBcRQVaCRZAVaAlzIUXDHVTEddB44IPXqrboQARAQAB +tDRSdWloYW5nIExhaSAoQ09ERSBTSUdOSU5HIEtFWSkgPHJ1aWhhbmdsQGFwYWNo +ZS5vcmc+iQJOBBMBCgA4FiEEKYqKo9Ja/ZXVyJxjyIFZU5B7Zq0FAmXt0EYCGwMF +CwkIBwMFFQoJCAsFFgIDAQACHgUCF4AACgkQyIFZU5B7Zq1lghAAt5H5wX1/2CIj +Gq4P17OZxmwkgxEP7E88PNu3s0AAFkk0qokuzy7fozGsQxjPdWUOqmZo+CdGQLn8 +kdUX19OQKC31alMzUKBOVecHezWAdMurb+s6rgXcwk+bMTgrg+i5Xhx0D+JjvYrC +GOHPaxdvF4/rypvPakBrk+ELt04AzHGEN1bGlSMXTrhgtAB60+bpDSqSk4gR6U11 +ks+iv463YhC2oOiSPQpWOlXHBBr976doLVCJnQpare6cdR+8ZZead1qlIRmVSL7A +r2/oEFHyVjGD0IRHP48xdHUlpG6crZwCr9hbsvoCxl4X6Td90SaM9EU9SL9uqQuh +xgh5WPGwYpbotYKpRApkJ5bdnaRhxwRWwS3tSAY8O952vDxkU6YIAGQhGg8sEI/i +W+DjlvzTK3ttXBXp2L3PM+jq6xyUxJbdaxfH0sFb4cVNQ1zrqBUe1VVZNqG5RRo3 +kRmsIWts7Nhu918bDKzJF5OM+Npk41mxG5X8t0FC0rc8sdea4AGcbg3/4IBaXGwk +k96J7FCmj9lgKVAZLxUjNgyeTJEG5uXSXxdqmsbv5Hc6GEyixcIvKAyXoGGnYKqs +9NqPynF139I9cjCKRwJfujtH/gCQ7Tr9i8j936sF0S4oSEZxB1TtYzwsxIgnDbyo +LZp0IYiRmWGeI5cpuQvOQ48Hoa6szZK5Ag0EZe3QRgEQALjd80At9uYE+qJM++ZR +vQ1np3p05pUQKvkiG3DUHKZi3ojypeIiyXod1+OQ1+VE4dlAU+XjlptebBa7nl6G +7eMV4sqAbRe25BLYfrbmszfGDij0+T2k2WHaWYDY8QT0IOjAGpdB2KTymiGIcTLv +zWlFdd0Y+3Pd8zBweCDOp6igDEnbzOj7uAAosZ9OI6Ufti5JZZGxCGbzENjqve0r +wUTI/f4X11sJakTxw0k0sEJcUlKyylXbpTetgPurbec5YhboAoTRDjA8R6r+jrmu +LGmP9tDRviGCou1VnOnTtS6ojr/7y7X6eX1gGCqWdLwMFde+aZyhJTfmVrYGDMh0 +m02rEUGkYnn/O79dXn8EbWsXnypVgW4DDzQAXH2b3m9b4pUsQrBfmXvtQQH2id18 +TD4IodtfZQKyjex8RBt5iYL+fQs/WfP3EP0sBlKVN4wllK5CzqRc5OvgxiZdajwX +crjAC4DMHPfSfmKuIFDTXRvKU3/rITCZwoFEzrrHCVLS+KqcJyc7G5FPGuJNpN2N +o1HGTU9qjeXIGy12CluJqqCR0EnD61yUhqVo8WndOjIFtPoef3qKRqAfxwZSe85X +yKHi0mVpb1JwqZM6jVDYVZksG1E10sAkhsiidanM59jmydIIq5C3ouvFN2ioSPMK +appJeRf1nGYaQeHdc+7kUHr9ABEBAAGJAjYEGAEKACAWIQQpioqj0lr9ldXInGPI +gVlTkHtmrQUCZe3QRgIbDAAKCRDIgVlTkHtmrbaAEADD/HWvPbwwmEt9pnUYBppj +mV9086uxJ8Pk+R8f5Mt17xkhC1wEhEuwo++uA569uGUQjPXiuUK93laHL3Y8ov/H +yYyQaNtFuoH3P83MinErXixTZ830x7eBabOpSZnm4GngUxUusUJfhrdznsHJTZ4z +xnBwnrXxAU1o3EVa9Wiy5m4bZiNoezw8P0lUbYUFWESD02n7kp7X7xdJ5w1F9p2O +xiclqs3LxsXdCQHtArsgPm9HPsoaJwjH2npZo0lc+214rm/d0LNjbLNz/riZui2H +Q3uVXxUSSO00vAmDUmYAU5Ym4E3eOsmZ9WSaS6QZPh77ATPGV7SVix32/fH0hgR1 +53Hpt9WKoavnNiNJHY05Ee1F4mbhOxKpr1lPPh5vK7vktn0ax+CwXY02izuT7SbE +Lgr+7cLYMrH/+Uu5JZRx0/4e2qCM4CU8gSwh8zl49VykvcIeS4gc8lyH13Hbr29C +DRwDSEzQ/xvG1Br1PJoqgtoz97+lNmMxNZv6NXLVe2OTiPAFJZfV2MCvd1rFN+2t +xDAUVNrnujLQRhYBSxtwfxmU1uOAnZ+cQVfOjefvZ7paGoIRHR3bDFuFJgzscrqA +zLCYllQ1hBsiHn1VM9W0v4lN1uKH/4xRegIoxbRp6VDqQzbGUxeTzayotRc+ZMf/ +2KO2FSofA649SDc2HheDeQ== +=yNdl +-----END PGP PUBLIC KEY BLOCK----- diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/LICENSE b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..82c7871cc65b11ab1ab69f221d228fde06ff0c0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/LICENSE @@ -0,0 +1,251 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------------ +This product bundles various third-party components under other open source licenses. +This section summarizes those components and their licenses. See licenses/ +for text of these licenses. + + +Apache Software Foundation License 2.0 +-------------------------------------- + +3rdparty/dlpack +3rdparty/dmlc-core +3rdparty/OpenCL-Headers +3rdparty/mlperftiny +3rdparty/nvbench (with LLVM exception) +3rdparty/cutlass_fpA_intB_gemm +3rdparty/tensorrt_llm + +BSD 2-clause License +-------------------- + +3rdparty/picojson +3rdparty/dmlc-core/include/dmlc/concurrentqueue.h + + +BSD 2-clause License + zlib License +----------------------------------- + +3rdparty/dmlc-core/include/dmlc/blockingconcurrentqueue.h + + +MIT License +----------- + +3rdparty/libcrc +3rdparty/cma +3rdparty/compiler-rt/builtin_fp16.h +3rdparty/cnpy + +The Unlicense +------------- + +3rdparty/rang + +BSD 3-Clause "New" or "Revised" License +--------------------------------------- + +3rdparty/cutlass +3rdparty/libbacktrace +3rdparty/libflash_attn diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/README.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e21a7ca21412e9e32f07e703972614a40e965944 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/README.md @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + + + Open Deep Learning Compiler Stack +============================================== +[Documentation](https://tvm.apache.org/docs) | +[Contributors](CONTRIBUTORS.md) | +[Community](https://tvm.apache.org/community) | +[Release Notes](NEWS.md) + +[![Build Status](https://ci.tlcpack.ai/buildStatus/icon?job=tvm/main)](https://ci.tlcpack.ai/job/tvm/job/main/) +[![WinMacBuild](https://github.com/apache/tvm/workflows/WinMacBuild/badge.svg)](https://github.com/apache/tvm/actions?query=workflow%3AWinMacBuild) + +Apache TVM is a compiler stack for deep learning systems. It is designed to close the gap between the +productivity-focused deep learning frameworks, and the performance- and efficiency-focused hardware backends. +TVM works with deep learning frameworks to provide end to end compilation to different backends. + +License +------- +TVM is licensed under the [Apache-2.0](LICENSE) license. + +Getting Started +--------------- +Check out the [TVM Documentation](https://tvm.apache.org/docs/) site for installation instructions, tutorials, examples, and more. +The [Getting Started with TVM](https://tvm.apache.org/docs/tutorial/introduction.html) tutorial is a great +place to start. + +Contribute to TVM +----------------- +TVM adopts apache committer model, we aim to create an open source project that is maintained and owned by the community. +Check out the [Contributor Guide](https://tvm.apache.org/docs/contribute/). + +Acknowledgement +--------------- +We learned a lot from the following projects when building TVM. +- [Halide](https://github.com/halide/Halide): Part of TVM's TIR and arithmetic simplification module + originates from Halide. We also learned and adapted some part of lowering pipeline from Halide. +- [Loopy](https://github.com/inducer/loopy): use of integer set analysis and its loop transformation primitives. +- [Theano](https://github.com/Theano/Theano): the design inspiration of symbolic scan operator for recurrence. \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/build/config.cmake b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/build/config.cmake new file mode 100644 index 0000000000000000000000000000000000000000..b8b32cdc7097a95741b6795c72e6730503e002d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/build/config.cmake @@ -0,0 +1,443 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#-------------------------------------------------------------------- +# Template custom cmake configuration for compiling +# +# This file is used to override the build options in build. +# If you want to change the configuration, please use the following +# steps. Assume you are on the root directory. First copy the this +# file so that any local changes will be ignored by git +# +# $ mkdir build +# $ cp cmake/config.cmake build +# +# Next modify the according entries, and then compile by +# +# $ cd build +# $ cmake .. +# +# Then build in parallel with 8 threads +# +# $ make -j8 +#-------------------------------------------------------------------- + +#--------------------------------------------- +# Backend runtimes. +#--------------------------------------------- + +# Whether enable CUDA during compile, +# +# Possible values: +# - ON: enable CUDA with cmake's auto search +# - OFF: disable CUDA +# - /path/to/cuda: use specific path to cuda toolkit +set(USE_CUDA OFF) + +# Whether to enable NCCL support: +# - ON: enable NCCL with cmake's auto search +# - OFF: disable NCCL +# - /path/to/nccl: use specific path to nccl +set(USE_NCCL OFF) + +# Whether enable ROCM runtime +# +# Possible values: +# - ON: enable ROCM with cmake's auto search +# - OFF: disable ROCM +# - /path/to/rocm: use specific path to rocm +set(USE_ROCM OFF) + +# Whether to enable RCCL support: +# - ON: enable RCCL with cmake's auto search +# - OFF: disable RCCL +# - /path/to/rccl: use specific path to rccl +set(USE_RCCL OFF) + +# Whether enable SDAccel runtime +set(USE_SDACCEL OFF) + +# Whether enable Intel FPGA SDK for OpenCL (AOCL) runtime +set(USE_AOCL OFF) + +# Whether enable OpenCL runtime +# +# Possible values: +# - ON: enable OpenCL with OpenCL wrapper to remove dependency during build +# time and trigger dynamic search and loading of OpenCL in runtime +# - OFF: disable OpenCL +# - /path/to/opencl-sdk: use specific path to opencl-sdk +set(USE_OPENCL OFF) + +# Wheather to allow OPENCL cl_mem access to host +# cl_mem will be allocated with CL_MEM_ALLOC_HOST_PTR +# OpenCLWorkspace->GetHostPtr API returns the host accessible pointer +set(USE_OPENCL_ENABLE_HOST_PTR OFF) + +# Whether enable Metal runtime +set(USE_METAL OFF) + +# Whether enable Vulkan runtime +# +# Possible values: +# - ON: enable Vulkan with cmake's auto search +# - OFF: disable vulkan +# - /path/to/vulkan-sdk: use specific path to vulkan-sdk +set(USE_VULKAN OFF) + +# Whether to use spirv-tools.and SPIRV-Headers from Khronos github or gitlab. +# +# Possible values: +# - OFF: not to use +# - /path/to/install: path to your khronis spirv-tools and SPIRV-Headers installation directory +# +set(USE_KHRONOS_SPIRV OFF) + +# whether enable SPIRV_KHR_DOT_PRODUCT +set(USE_SPIRV_KHR_INTEGER_DOT_PRODUCT OFF) + +# Whether enable OpenGL runtime +set(USE_OPENGL OFF) + +# Whether enable MicroTVM runtime +set(USE_MICRO OFF) + +# Whether enable RPC runtime +set(USE_RPC ON) + +# Whether to build the C++ RPC server binary +set(USE_CPP_RPC OFF) + +# Whether to build the C++ native runtime tool binary +set(USE_CPP_RTVM OFF) + +# Whether to build the iOS RPC server application +set(USE_IOS_RPC OFF) + +# Whether embed stackvm into the runtime +set(USE_STACKVM_RUNTIME OFF) + +# Whether enable tiny embedded graph executor. +set(USE_GRAPH_EXECUTOR ON) + +# Whether enable tiny graph executor with CUDA Graph +set(USE_GRAPH_EXECUTOR_CUDA_GRAPH OFF) + +# Whether enable pipeline executor. +set(USE_PIPELINE_EXECUTOR OFF) + +# Whether to enable the profiler for the graph executor and vm +set(USE_PROFILER ON) + +# Whether enable microTVM standalone runtime +set(USE_MICRO_STANDALONE_RUNTIME OFF) + +# Whether build with LLVM support +# Requires LLVM version >= 4.0 +# +# Possible values: +# - ON: enable llvm with cmake's find search +# - OFF: disable llvm, note this will disable CPU codegen +# which is needed for most cases +# - /path/to/llvm-config: enable specific LLVM when multiple llvm-dev is available. +set(USE_LLVM OFF) +set(HIDE_PRIVATE_SYMBOLS ON) + +# Whether use MLIR to help analyze, requires USE_LLVM is enabled +# Possible values: ON/OFF +set(USE_MLIR OFF) + +#--------------------------------------------- +# Contrib libraries +#--------------------------------------------- +# Whether to build with BYODT software emulated posit custom datatype +# +# Possible values: +# - ON: enable BYODT posit, requires setting UNIVERSAL_PATH +# - OFF: disable BYODT posit +# +# set(UNIVERSAL_PATH /path/to/stillwater-universal) for ON +set(USE_BYODT_POSIT OFF) + +# Whether use BLAS, choices: openblas, atlas, apple +set(USE_BLAS none) + +# Whether to use MKL +# Possible values: +# - ON: Enable MKL +# - /path/to/mkl: mkl root path +# - OFF: Disable MKL +# set(USE_MKL /opt/intel/mkl) for UNIX +# set(USE_MKL ../IntelSWTools/compilers_and_libraries_2018/windows/mkl) for WIN32 +# set(USE_MKL ) if using `pip install mkl` +set(USE_MKL OFF) + +# Whether use DNNL library, aka Intel OneDNN: https://oneapi-src.github.io/oneDNN +# +# Now matmul/dense/conv2d supported by -libs=dnnl, +# and more OP patterns supported in DNNL codegen(json runtime) +# +# choices: +# - ON: Enable DNNL in BYOC and -libs=dnnl, by default using json runtime in DNNL codegen +# - JSON: same as above. +# - C_SRC: use c source runtime in DNNL codegen +# - path/to/oneDNN:oneDNN root path +# - OFF: Disable DNNL +set(USE_DNNL OFF) + +# Whether use Intel AMX instructions. +set(USE_AMX OFF) + +# Whether use OpenMP thread pool, choices: gnu, intel +# Note: "gnu" uses gomp library, "intel" uses iomp5 library +set(USE_OPENMP none) + +# Whether use contrib.random in runtime +set(USE_RANDOM ON) + +# Whether use NNPack +set(USE_NNPACK OFF) + +# Possible values: +# - ON: enable tflite with cmake's find search +# - OFF: disable tflite +# - /path/to/libtensorflow-lite.a: use specific path to tensorflow lite library +set(USE_TFLITE OFF) + +# /path/to/tensorflow: tensorflow root path when use tflite library +set(USE_TENSORFLOW_PATH none) + +# Required for full builds with TFLite. Not needed for runtime with TFLite. +# /path/to/flatbuffers: flatbuffers root path when using tflite library +set(USE_FLATBUFFERS_PATH none) + +# Possible values: +# - OFF: disable tflite support for edgetpu +# - /path/to/edgetpu: use specific path to edgetpu library +set(USE_EDGETPU OFF) + +# Possible values: +# - ON: enable cuDNN with cmake's auto search in CUDA directory +# - OFF: disable cuDNN +# - /path/to/cudnn: use specific path to cuDNN path +set(USE_CUDNN OFF) + +# Whether use cuBLAS +set(USE_CUBLAS OFF) + +# Whether use MIOpen +set(USE_MIOPEN OFF) + +# Whether use MPS +set(USE_MPS OFF) + +# Whether use rocBlas +set(USE_ROCBLAS OFF) + +# Whether use contrib sort +set(USE_SORT ON) + +# Whether to use Arm Compute Library (ACL) codegen +# We provide 2 separate flags since we cannot build the ACL runtime on x86. +# This is useful for cases where you want to cross-compile a relay graph +# on x86 then run on AArch. +# +# An example of how to use this can be found here: docs/deploy/arm_compute_lib.rst. +# +# USE_ARM_COMPUTE_LIB - Support for compiling a relay graph offloading supported +# operators to Arm Compute Library. OFF/ON +# USE_ARM_COMPUTE_LIB_GRAPH_EXECUTOR - Run Arm Compute Library annotated functions via the ACL +# runtime. OFF/ON/"path/to/ACL" +set(USE_ARM_COMPUTE_LIB OFF) +set(USE_ARM_COMPUTE_LIB_GRAPH_EXECUTOR OFF) + +# Whether to build with Arm Ethos-N support +# Possible values: +# - OFF: disable Arm Ethos-N support +# - path/to/arm-ethos-N-stack: use a specific version of the +# Ethos-N driver stack +set(USE_ETHOSN OFF) +# If USE_ETHOSN is enabled, use ETHOSN_HW (ON) if Ethos-N hardware is available on this machine +# otherwise use ETHOSN_HW (OFF) to use the software test infrastructure +set(USE_ETHOSN_HW OFF) + +# Whether to build with Arm(R) Ethos(TM)-U NPU codegen support +set(USE_ETHOSU OFF) + +# Whether to build with CMSIS-NN external library support. +# See https://github.com/ARM-software/CMSIS_5 +set(USE_CMSISNN OFF) + +# Whether to build with TensorRT codegen or runtime +# Examples are available here: docs/deploy/tensorrt.rst. +# +# USE_TENSORRT_CODEGEN - Support for compiling a relay graph where supported operators are +# offloaded to TensorRT. OFF/ON +# USE_TENSORRT_RUNTIME - Support for running TensorRT compiled modules, requires presense of +# TensorRT library. OFF/ON/"path/to/TensorRT" +set(USE_TENSORRT_CODEGEN OFF) +set(USE_TENSORRT_RUNTIME OFF) + +# Whether use VITIS-AI codegen +set(USE_VITIS_AI OFF) + +# Build Verilator codegen and runtime +set(USE_VERILATOR OFF) + +#Whether to use CLML codegen +set(USE_CLML OFF) +# USE_CLML_GRAPH_EXECUTOR - CLML SDK PATH or ON or OFF +set(USE_CLML_GRAPH_EXECUTOR OFF) + +# Build ANTLR parser for Relay text format +# Possible values: +# - ON: enable ANTLR by searching default locations (cmake find_program for antlr4 and /usr/local for jar) +# - OFF: disable ANTLR +# - /path/to/antlr-*-complete.jar: path to specific ANTLR jar file +set(USE_ANTLR OFF) + +# Whether use Relay debug mode +set(USE_RELAY_DEBUG OFF) + +# Whether to build fast VTA simulator driver +set(USE_VTA_FSIM OFF) + +# Whether to build cycle-accurate VTA simulator driver +set(USE_VTA_TSIM OFF) + +# Whether to build VTA FPGA driver (device side only) +set(USE_VTA_FPGA OFF) + +# Whether use Thrust +set(USE_THRUST OFF) + +# Whether use cuRAND +set(USE_CURAND OFF) + +# Whether to build the TensorFlow TVMDSOOp module +set(USE_TF_TVMDSOOP OFF) + +# Whether to build the PyTorch custom class module +set(USE_PT_TVMDSOOP OFF) + +# Whether to use STL's std::unordered_map or TVM's POD compatible Map +set(USE_FALLBACK_STL_MAP OFF) + +# Whether to enable Hexagon support +set(USE_HEXAGON OFF) +set(USE_HEXAGON_SDK /path/to/sdk) + +# Whether to build the minimal support android rpc server for Hexagon +set(USE_HEXAGON_RPC OFF) + +# Hexagon architecture to target when compiling TVM itself (not the target for +# compiling _by_ TVM). This applies to components like the TVM runtime, but is +# also used to select correct include/library paths from the Hexagon SDK when +# building runtime for Android. +# Valid values are v65, v66, v68, v69, v73. +set(USE_HEXAGON_ARCH "v68") + +# Whether to use QHL library +set(USE_HEXAGON_QHL OFF) + +# Whether to use ONNX codegen +set(USE_TARGET_ONNX OFF) + +# Whether enable BNNS runtime +set(USE_BNNS OFF) + +# Whether to build static libtvm_runtime.a, the default is to build the dynamic +# version: libtvm_runtime.so. +# +# The static runtime library needs to be linked into executables with the linker +# option --whole-archive (or its equivalent). The reason is that the TVM registry +# mechanism relies on global constructors being executed at program startup. +# Global constructors alone are not sufficient for the linker to consider a +# library member to be used, and some of such library members (object files) may +# not be included in the final executable. This would make the corresponding +# runtime functions to be unavailable to the program. +set(BUILD_STATIC_RUNTIME OFF) + +# Caches the build so that building is faster when switching between branches. +# If you switch branches, build and then encounter a linking error, you may +# need to regenerate the build tree through "make .." (the cache will +# still provide significant speedups). +# Possible values: +# - AUTO: search for path to ccache, disable if not found. +# - ON: enable ccache by searching for the path to ccache, report an error if not found +# - OFF: disable ccache +# - /path/to/ccache: use specific path to ccache +set(USE_CCACHE AUTO) + +# Whether to use libbacktrace to supply linenumbers on stack traces. +# Possible values: +# - ON: Find libbacktrace from system paths. Report an error if not found. +# - OFF: Don't use libbacktrace. +# - /path/to/libbacktrace: Looking for the libbacktrace header and static lib from a user-provided path. Report error if not found. +# - COMPILE: Build and link to libbacktrace from 3rdparty/libbacktrace. +# - AUTO: +# - Find libbacktrace from system paths. +# - If not found, fallback to COMPILE on Linux or MacOS, fallback to OFF on Windows or other platforms. +set(USE_LIBBACKTRACE AUTO) + +# Whether to install a signal handler to print a backtrace on segfault. +# Need to have USE_LIBBACKTRACE enabled. +set(BACKTRACE_ON_SEGFAULT OFF) + +# Whether to enable PAPI support in profiling. PAPI provides access to hardware +# counters while profiling. +# Possible values: +# - ON: enable PAPI support. Will search PKG_CONFIG_PATH for a papi.pc +# - OFF: disable PAPI support. +# - /path/to/folder/containing/: Path to folder containing papi.pc. +set(USE_PAPI OFF) + +# Whether to use GoogleTest for C++ unit tests. When enabled, the generated +# build file (e.g. Makefile) will have a target "cpptest". +# Possible values: +# - ON: enable GoogleTest. The package `GTest` will be required for cmake +# to succeed. +# - OFF: disable GoogleTest. +# - AUTO: cmake will attempt to find the GTest package, if found GTest will +# be enabled, otherwise it will be disabled. +# Note that cmake will use `find_package` to find GTest. Please use cmake's +# predefined variables to specify the path to the GTest package if needed. +set(USE_GTEST AUTO) + +# Enable using CUTLASS as a BYOC backend +# Need to have USE_CUDA=ON +set(USE_CUTLASS OFF) + +# Enable to show a summary of TVM options +set(SUMMARIZE OFF) + +# Whether to use LibTorch as backend +# To enable pass the path to the root libtorch (or PyTorch) directory +# OFF or /path/to/torch/ +set(USE_LIBTORCH OFF) + +# Whether to use the Universal Modular Accelerator Interface +set(USE_UMA OFF) + +# Set custom Alloc Alignment for device allocated memory ndarray points to +set(USE_KALLOC_ALIGNMENT 64) +set(USE_LLVM /root/BitBLAS/3rdparty/clang+llvm-10.0.1-x86_64-linux-gnu-ubuntu-16.04/bin/llvm-config) +set(USE_CUDA /usr/local/cuda) +set(USE_LLVM /root/BitBLAS/3rdparty/clang+llvm-10.0.1-x86_64-linux-gnu-ubuntu-16.04/bin/llvm-config) +set(USE_CUDA /usr/local/cuda) +set(USE_LLVM /root/BitBLAS/3rdparty/clang+llvm-10.0.1-x86_64-linux-gnu-ubuntu-16.04/bin/llvm-config) +set(USE_CUDA /usr/local/cuda) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/conftest.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..861abc14b843ab70e0168a6c77716b7eab173ffe --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/conftest.py @@ -0,0 +1,108 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import hashlib +import pytest +import sys +import os + +from pathlib import Path + +pytest_plugins = ["tvm.testing.plugin"] +IS_IN_CI = os.getenv("CI", "") == "true" +REPO_ROOT = Path(__file__).resolve().parent + + +# These are long running tests (manually curated and extracted from CI logs) +# that should be allocated to test shards in a round-robin fashion. These are +# taken from the 20 (arbitrary number) of tests as from +# https://ci.tlcpack.ai/job/tvm/job/main/2907/testReport +_slowest_tests = [ + "tests/python/frontend/tensorflow/test_forward.py::test_forward_broadcast_args", + "tests/python/frontend/tensorflow/test_forward.py::test_forward_broadcast_to", + "tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[int8]", + "tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[uint8]", + "tests/python/topi/python/test_topi_upsampling.py::test_upsampling3d", + "tests/python/topi/python/test_topi_upsampling.py::test_upsampling3d", + "tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[int8]", + "tests/python/frontend/tflite/test_forward.py::test_all_elemwise", + "tests/python/frontend/pytorch/test_object_detection.py::test_detection_models", + "tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[uint8]", + "tests/python/topi/python/test_topi_conv2d_NCHWc.py::test_conv2d_NCHWc", + "tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py::test_conv2d_hwnc_tensorcore", + "tests/python/contrib/test_tensorrt.py::test_binary[compile]", + "tests/python/frontend/pytorch/test_forward.py::test_segmentation_models", + "tests/python/topi/python/test_topi_conv2d_NCHWc.py::test_conv2d_NCHWc", + "tests/python/relay/test_py_converter.py::test_global_recursion", + "tests/python/frontend/tensorflow/test_forward.py::test_forward_ptb", + "tests/python/relay/test_op_level6.py::test_topk", + "tests/python/topi/python/test_topi_conv2d_winograd.py::test_conv2d_nchw", + "tests/python/relay/test_py_converter.py::test_global_recursion", +] +HARDCODED_ALLOCATIONS = {} +for idx, test in enumerate(_slowest_tests): + HARDCODED_ALLOCATIONS[test] = idx + +# These rely on running on the same node to pass successfully +FIXED_ALLOCATION_PREFIXES = { + "tests/python/testing/test_tvm_testing_features.py": 0, +} + + +def find_shard_index(nodeid: str, num_shards: int) -> int: + """ + Return the index of the shard that should run this test + """ + for prefix, target_shard_idx in FIXED_ALLOCATION_PREFIXES.items(): + if nodeid.startswith(prefix): + if target_shard_idx >= num_shards: + raise RuntimeError( + f"Cannot collect sharded tests, {nodeid} has hardcoded shard index {target_shard_idx} among only {num_shards} shards" + ) + return target_shard_idx + + if nodeid in HARDCODED_ALLOCATIONS: + hash = HARDCODED_ALLOCATIONS[nodeid] + else: + hash = hashlib.md5(nodeid.encode()) + hash = int(hash.hexdigest(), 16) + + return hash % num_shards + + +def pytest_collection_modifyitems(config, items): + if not all(k in os.environ for k in ["CI", "TVM_NUM_SHARDS", "TVM_SHARD_INDEX"]): + # Only apportion tests if in CI and in a job that is set up for it + return + + num_shards = int(os.environ["TVM_NUM_SHARDS"]) + shard_index = int(os.environ["TVM_SHARD_INDEX"]) + + print(f"Marking tests for shard {shard_index} of {num_shards}") + items_copy = list(items) + for item in items_copy: + item_shard_index = find_shard_index(item.nodeid, num_shards=num_shards) + if item_shard_index != shard_index: + items.remove(item) + + +def pytest_sessionstart(): + if IS_IN_CI: + hook_script_dir = REPO_ROOT / "tests" / "scripts" / "request_hook" + sys.path.append(str(hook_script_dir)) + import request_hook # pylint: disable=import-outside-toplevel + + request_hook.init() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.blockingconcurrentqueue.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.blockingconcurrentqueue.txt new file mode 100644 index 0000000000000000000000000000000000000000..d08e53a3c518fa8b09c0f34505c3fea648cf9c64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.blockingconcurrentqueue.txt @@ -0,0 +1,26 @@ +©2015-2016 Cameron Desrochers. Distributed under the terms of the simplified +BSD license, available at the top of concurrentqueue.h. + +Uses Jeff Preshing's semaphore implementation (under the terms of its +separate zlib license, embedded below). + + +zlib license +------------ +Copyright (c) 2015 Jeff Preshing + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgement in the product documentation would be + appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. +3. This notice may not be removed or altered from any source distribution. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.builtin_fp16.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.builtin_fp16.txt new file mode 100644 index 0000000000000000000000000000000000000000..5a79a1b9d5cb0af9a779751f07033e44ca3b14d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.builtin_fp16.txt @@ -0,0 +1,311 @@ +============================================================================== +The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: +============================================================================== + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +---- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + +============================================================================== +Software from third parties included in the LLVM Project: +============================================================================== +The LLVM Project contains third party software which is under different license +terms. All such code will be identified clearly using at least one of two +mechanisms: +1) It will be in a separate directory tree with its own `LICENSE.txt` or + `LICENSE` file at the top containing the specific license and restrictions + which apply to that software, or +2) It will contain specific license and restriction terms at the top of every + file. + +============================================================================== +Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy): +============================================================================== + +The compiler_rt library is dual licensed under both the University of Illinois +"BSD-Like" license and the MIT license. As a user of this code you may choose +to use it under either license. As a contributor, you agree to allow your code +to be used under both. + +Full text of the relevant licenses is included below. + +============================================================================== + +University of Illinois/NCSA +Open Source License + +Copyright (c) 2009-2019 by the contributors listed in CREDITS.TXT + +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== + +Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cma.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cma.txt new file mode 100644 index 0000000000000000000000000000000000000000..00028209d171596486be491d2f6a44336db32f95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cma.txt @@ -0,0 +1,22 @@ +The MIT License (MIT) + +COPYRIGHT (C) 2017 Institute of Electronics and Computer Science (EDI), Latvia. +AUTHOR: Rihards Novickis (rihards.novickis@edi.lv) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cnpy.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cnpy.txt new file mode 100644 index 0000000000000000000000000000000000000000..e60eadbccb3f9e19bc6cebaa4505da27dfed2ff1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cnpy.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) Carl Rogers, 2011 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.concurrentqueue.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.concurrentqueue.txt new file mode 100644 index 0000000000000000000000000000000000000000..b36f9eadc9f96666d18b1f1c6de3dde13ec40090 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.concurrentqueue.txt @@ -0,0 +1,22 @@ +Simplified BSD license: +Copyright (c) 2013-2016, Cameron Desrochers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, this list of +conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, this list of +conditions and the following disclaimer in the documentation and/or other materials +provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cutlass.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cutlass.txt new file mode 100644 index 0000000000000000000000000000000000000000..64a49d680b1eda31469a2d2da58a757366f9f1aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cutlass.txt @@ -0,0 +1,23 @@ +Copyright (c) 2017 - 2020, NVIDIA CORPORATION. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the NVIDIA CORPORATION nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cutlass_fpA_intB_gemm.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cutlass_fpA_intB_gemm.txt new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.cutlass_fpA_intB_gemm.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.dlpack.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.dlpack.txt new file mode 100644 index 0000000000000000000000000000000000000000..20a9c8a7b4dce845c8bfa24bf5cd8fbd6beab479 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.dlpack.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 by Contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.l2_cache_flush.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.l2_cache_flush.txt new file mode 100644 index 0000000000000000000000000000000000000000..bd8b243dfa02d4e7080150180520f742d2861d15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.l2_cache_flush.txt @@ -0,0 +1,218 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +--- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libbacktrace.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libbacktrace.txt new file mode 100644 index 0000000000000000000000000000000000000000..e9e256244d6965a8616b01c5f5184164dc1ed804 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libbacktrace.txt @@ -0,0 +1,29 @@ +# Copyright (C) 2012-2016 Free Software Foundation, Inc. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: + +# (1) Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. + +# (2) Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. + +# (3) The name of the author may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libcrc.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libcrc.txt new file mode 100644 index 0000000000000000000000000000000000000000..0552660c48702f84992133abca3aab305265ce78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libcrc.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 1999-2016 Lammert Bies + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libflash_attn.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libflash_attn.txt new file mode 100644 index 0000000000000000000000000000000000000000..5860e4b33f3d9d85fc636137c559331d51783a5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.libflash_attn.txt @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.picojson.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.picojson.txt new file mode 100644 index 0000000000000000000000000000000000000000..72f3553911104d95a6e438a4040763a4a3903013 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.picojson.txt @@ -0,0 +1,25 @@ +Copyright 2009-2010 Cybozu Labs, Inc. +Copyright 2011-2014 Kazuho Oku +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.rang.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.rang.txt new file mode 100644 index 0000000000000000000000000000000000000000..cf1ab25da0349f84a3fdd40032f0ce99db813b8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.rang.txt @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.tensorrt_llm.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.tensorrt_llm.txt new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.tensorrt_llm.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.vllm.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.vllm.txt new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/licenses/LICENSE.vllm.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/mypy.ini b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/mypy.ini new file mode 100644 index 0000000000000000000000000000000000000000..4a429b7dadadf194acd97ab515c5fdc84296d5e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/mypy.ini @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[mypy] +ignore_missing_imports = False +show_column_numbers = True +show_error_context = True +follow_imports = skip +ignore_errors = False +strict_optional = False + +[mypy-python.tvm.auto_scheduler.*] +ignore_errors = True + +[mypy-python.tvm.runtime.*] +ignore_errors = True + +[mypy-python.tvm.tir.schedule.*] +ignore_errors = False diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/pyproject.toml b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..91740f2b4b4ab47069e572aefa5a7395bc036d3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/pyproject.toml @@ -0,0 +1,51 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +[tool.isort] +profile = "black" +src_paths = ["python", "tests/python"] + +[tool.black] +line-length = 100 +target-version = ['py36'] +include = '(\.pyi?$)' +exclude = ''' + +( + /( + \.github + | \.tvm + | \.tvm_test_data + | \.vscode + | \.venv + | 3rdparty + | build\/ + | cmake\/ + | conda\/ + | docker\/ + | docs\/ + | golang\/ + | include\/ + | jvm\/ + | licenses\/ + | nnvm\/ + | rust\/ + | src\/ + | vta\/ + | web\/ + )/ +) +''' diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/.gitignore b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..4c6fde5b68b5b2cfab2ef5a5b3b182b5a9d95e48 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/.gitignore @@ -0,0 +1,4 @@ +build +dist +*.cpp +requirements/*.txt diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/gen_requirements.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/gen_requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..0c8200f60b107414aa4179a117e53934ddfd2a8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/gen_requirements.py @@ -0,0 +1,668 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""TVM Python requirements.txt generator. + +This script generates a set of requirements.txt files (stored in `./requirements`) that describe +TVM's Python dependencies. + +## Pieces + +TVM can be roughly broken into these named pieces along the lines of Python dependencies: + +- "core": A core piece, which is intended to be buildable with very few external dependencies. Users + can use Relay, compile models, and run autotuning with this part. +- "importer-": Model importers, which convert models defined in various other tools (i.e. + TensorFlow, PyTorch, etc) into Relay models. +- Extra features (i.e. XGBoost in AutoTVM). These enhance TVM's functionality, but aren't required + for basic operation. + +## What this tool does + +From these pieces, this tool builds: + - requirements/.txt - Python dependencies for each named piece above, `` is the same as + the quoted piece name. + - requirements/all.txt - Consolidated Python dependencies for all pieces, excluding dev below. + - requirements/dev.txt - Python dependencies needed to develop TVM, such as lint and test tools. + +The data representing each piece is contained in the two maps below. +""" + +import argparse +import collections +import os +import re +import sys +import textwrap +import typing + +RequirementsByPieceType = typing.List[typing.Tuple[str, typing.Tuple[str, typing.List[str]]]] + + +# Maps named TVM piece (see description above) to a list of names of Python packages. Please use +# alphabetical order for each package list, and do not add version constraints here! +REQUIREMENTS_BY_PIECE: RequirementsByPieceType = [ + # Base requirements needed to install tvm. + ( + "core", + ( + "Base requirements needed to install tvm", + [ + "attrs", + "cloudpickle", + "decorator", + "ml_dtypes", + "numpy", + "psutil", + "scipy", + "tornado", + "typing_extensions", + ], + ), + ), + # Provide support for Arm(R) Ethos(TM)-U NPU. + ( + "ethosu", + ( + "Requirements for using Arm(R) Ethos(TM)-U NPU", + [ + "ethos-u-vela", + ], + ), + ), + # Relay frontends. + ( + "importer-caffe", + ( + "Requirements for the Caffe importer", + [ + "numpy", + "protobuf", + "scikit-image", + "six", + ], + ), + ), + ( + "importer-caffe2", + ( + "Requirements for the Caffe2 importer", + [ + "future", # Hidden dependency of torch. + "torch", + ], + ), + ), + ("importer-coreml", ("Requirements for the CoreML importer", ["coremltools"])), + ("importer-darknet", ("Requirements for the DarkNet importer", ["opencv-python"])), + ( + "importer-keras", + ("Requirements for the Keras importer", ["tensorflow", "tensorflow-estimator"]), + ), + ( + "importer-onnx", + ( + "Requirements for the ONNX importer", + [ + "future", # Hidden dependency of torch. + "onnx", + "onnxoptimizer", + "onnxruntime", + "torch", + "torchvision", + ], + ), + ), + ( + "importer-paddle", + ("Requirements for the PaddlePaddle importer", ["paddlepaddle"]), + ), + ( + "importer-pytorch", + ( + "Requirements for the PyTorch importer", + [ + "future", # Hidden dependency of torch. + "torch", + "torchvision", + ], + ), + ), + ( + "importer-tensorflow", + ("Requirements for the TensorFlow importer", ["tensorflow", "tensorflow-estimator"]), + ), + ( + "importer-tflite", + ("Requirements for the TFLite importer", ["tensorflow", "tensorflow-estimator", "tflite"]), + ), + ( + "tvmc", + ( + "Requirements for the tvmc command-line tool", + [ + "ethos-u-vela", + "future", # Hidden dependency of torch. + "onnx", + "onnxoptimizer", + "onnxruntime", + "paddlepaddle", + "tensorflow", + "tflite", + "torch", + "torchvision", + "xgboost", + ], + ), + ), + # Vitis AI requirements + ( + "vitis-ai", + ( + "Requirements for the Vitis AI codegen", + [ + "h5py", + "progressbar", + ], + ), + ), + # XGBoost, useful for autotuning on some targets. + ( + "xgboost", + ( + "Requirements for XGBoost autotuning", + [ + "future", # Hidden dependency of torch. + "torch", + "xgboost", + ], + ), + ), + # Development requirements + ( + "dev", + ( + "Requirements to develop TVM -- lint, docs, testing, etc.", + [ + "astroid", # pylint requirement, listed so a hard constraint can be included. + "autodocsumm", + "black", + "commonmark", + "cpplint", + "docutils", + "image", + "matplotlib", + "pillow", + "pylint", + "sphinx", + "sphinx_autodoc_annotation", + "sphinx_gallery", + "sphinx_rtd_theme", + "types-psutil", + ], + ), + ), +] + +ConstraintsType = typing.List[typing.Tuple[str, typing.Union[None, str]]] + +# Maps a named Python package (which should appear in REQUIREMENTS_BY_PIECE above) to a +# semver or pip version constraint. Semver constraints are translated into requirements.txt-friendly +# constraints. +# +# These constraints serve only to record technical reasons why a particular version can't be used. +# They are the default install_requires used in setup.py. These can be further narrowed to restrict +# dependencies to those tested or used in CI; however, that process is not done here. +# +# Policy for constraints listed here: +# 1. Each package specified in REQUIREMENTS_BY_PIECE must be included here. +# 2. If TVM will functionally break against an old version of a dependency, specify a >= relation +# here. Include a comment linking to context or explaining why the constraint is in place. +CONSTRAINTS = [ + ("astroid", None), + ("attrs", None), + ("autodocsumm", None), + ("black", "==20.8b1"), + ("cloudpickle", None), + ("commonmark", ">=0.7.3"), # From PR #213. + ("coremltools", None), + ("cpplint", None), + ("decorator", None), + ( + "docutils", + "<0.17", + ), # Work around https://github.com/readthedocs/sphinx_rtd_theme/issues/1115 + ("ethos-u-vela", "==3.8.0"), + ("future", None), + ("h5py", "==2.10.0"), + ("image", None), + ("matplotlib", None), + ("numpy", None), + ("onnx", None), + ("onnxoptimizer", None), + ("onnxruntime", None), + ("opencv-python", None), + ("paddlepaddle", None), + ("pillow", None), + ("progressbar", None), + ("protobuf", None), + ("psutil", None), + ("pylint", None), + ("scikit-image", None), + ("scipy", None), + ("six", None), + ("sphinx", None), + ("sphinx_autodoc_annotation", None), + ("sphinx_gallery", None), + ("sphinx_rtd_theme", None), + ("tensorflow", None), + ("tensorflow-estimator", None), + ("tflite", None), + ("torch", None), + ("torchvision", None), + ("tornado", None), + ("typing_extensions", None), + ("xgboost", ">=1.1.0"), # From PR #4953 & Issue #12009 +] + +################################################################################ +# End of configuration options. +################################################################################ + + +# Required keys in REQUIREMENTS_BY_PIECE. +REQUIRED_PIECES: typing.List[str] = ["core", "dev"] + +# Regex to validates piece names. +PIECE_REGEX: typing.Pattern = re.compile(r"^[a-z0-9][a-z0-9-]*", re.IGNORECASE) + +# Regex to match a constraint specification. Multiple constraints are not supported. +CONSTRAINT_REGEX: typing.Pattern = re.compile(r"(?:\^|\<|(?:~=)|(?:<=)|(?:==)|(?:>=)|\>)[^<>=\^,]+") + +# Regex for parsing semantic versions. See +# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string +SEMVER_REGEX: typing.Pattern = re.compile( + r"^(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$" +) + + +def validate_requirements_by_piece() -> typing.List[str]: + """Validate REQUIREMENTS_BY_PIECE, returning a list of problems. + + Returns + ------- + list[str] : + A list of strings, each one describing a distinct problem with REQUIREMENTS_BY_PIECE. + """ + problems = [] + + unseen_required_pieces = set(REQUIRED_PIECES) + seen_pieces = set() + + # Ensure that core is listed first and dev is listed last. + saw_core = False + saw_dev = False + + if not isinstance(REQUIREMENTS_BY_PIECE, (list, tuple)): + problems.append(f"must be list or tuple, see {REQUIREMENTS_BY_PIECE!r}") + return problems + + for piece, value in REQUIREMENTS_BY_PIECE: + if not isinstance(piece, str): + problems.append(f"piece {piece!r}: must be str") + continue + + if piece in unseen_required_pieces: + unseen_required_pieces.remove(piece) + + piece_lower = piece.lower() + if piece_lower in seen_pieces: + problems.append(f"piece {piece}: listed twice") + + seen_pieces.add(piece_lower) + + if not saw_core and piece != "core": + problems.append(f'piece {piece}: must list after "core" (core must be first)') + elif piece == "core": + saw_core = True + + if saw_dev: + problems.append(f'piece {piece}: must list before "dev" (dev must be last)') + elif piece == "dev": + saw_dev = True + + if not isinstance(value, (tuple, list)) or len(value) != 2: + problems.append( + f'piece {piece}: should be formatted like ("{piece}", ("", ["dep1", "dep2", ...])). got: {value!r}' + ) + continue + + description, deps = value + + if not isinstance(description, str): + problems.append(f"piece {piece}: description should be a string, got {description!r}") + + if not isinstance(deps, (list, tuple)) or any(not isinstance(d, str) for d in deps): + problems.append(f"piece {piece}: deps should be a list of strings, got {deps!r}") + continue + + if list(sorted(deps)) != list(deps): + problems.append( + f"piece {piece}: deps must be sorted. Correct order:\n {list(sorted(deps))!r}" + ) + + piece_deps = set() + for d in deps: + if CONSTRAINT_REGEX.search(d): + problems.append( + f"piece {piece}: dependency {d} should not specify a version. " + "Add it to CONSTRAINTS instead." + ) + + if d.lower() in piece_deps: + problems.append(f"piece {piece}: dependency {d} listed twice") + + piece_deps.add(d.lower()) + + extras_pieces = [ + k for (k, _) in REQUIREMENTS_BY_PIECE if k not in ("dev", "core") if isinstance(k, str) + ] + sorted_extras_pieces = list(sorted(extras_pieces)) + if sorted_extras_pieces != list(extras_pieces): + problems.append( + 'pieces other than "core" and "dev" must appear in alphabetical order: ' + f"{sorted_extras_pieces}" + ) + + return problems + + +def parse_semver( + package: str, constraint: str, problems: typing.List[str] +) -> typing.Tuple[typing.List[str], int, int]: + """Parse a semantic versioning constraint of the form "^X.[.Y[.Z[...]]]]" + + Parameters + ---------- + package : str + Name of the package specifying this constraint, for reporting problems. + constraint : str + The semver constraint. Must start with "^" + problems : List[str] + A list of strings describing problems that have occurred validating the configuration. + Problems encountered while validating constraint are appended to this list. + + Returns + ------- + tuple[list[str], int, int] : + A 3-tuple. The first element is a list containing an entry for each component in the + semver string (components separated by "."). The second element is the index of the + component in the list which must not change to meet the semver constraint. The third element + is an integer, the numeric value of the changing component (this can be non-trivial when + the patch is the changing part but pre-, post-release, or build metadta. + + See "Caret requirements" at https://python-poetry.org/docs/versions/. + """ + m = SEMVER_REGEX.match(constraint[1:]) + if not m: + problems.append(f"{package}: invalid semver constraint {constraint}") + return [], 0, 0 + + min_ver_parts = [ + m.group("major"), + m.group("minor"), + m.group("patch") + + (f"-{m.group('prerelease')}" if m.group("prerelease") else "") + + (f"+{m.group('buildmetadata')}" if m.group("buildmetadata") else ""), + ] + + # Major/minor version handling is simple + for i, p in enumerate(min_ver_parts[:2]): + x = int(p.strip()) + if x: + return min_ver_parts, i, x + + # For patch version, consult only the numeric patch + if m.group("patch"): + patch_int = int(m.group("patch")) + if patch_int or min_ver_parts[2] != m.group("patch"): + return min_ver_parts, 2, patch_int + + # All 0's + return min_ver_parts, 0, 0 + + +def validate_constraints() -> typing.List[str]: + """Validate CONSTRAINTS, returning a list of problems found. + + Returns + ------- + list[str] : + A list of strings, each one describing a distinct problem found in CONSTRAINTS. + """ + problems = [] + + if not isinstance(CONSTRAINTS, (list, tuple)): + problems.append(f"must be list or tuple, see: {CONSTRAINTS!r}") + + seen_packages = set() + all_deps = set() + for _, (_, deps) in REQUIREMENTS_BY_PIECE: + for d in deps: + all_deps.add(d.lower()) + + for package, constraint in CONSTRAINTS: + if package in seen_packages: + problems.append(f"{package}: specified twice") + seen_packages.add(package) + + if package.lower() not in all_deps: + problems.append(f"{package}: not specified in REQUIREMENTS_BY_PIECE") + + if constraint is None: # None is just a placeholder that allows for comments. + continue + + if not CONSTRAINT_REGEX.match(constraint): + problems.append( + f'{package}: constraint "{constraint}" does not look like a valid constraint' + ) + + if constraint.startswith("^"): + parse_semver(package, constraint, problems) + + all_constrained_packages = [p for (p, _) in CONSTRAINTS] + sorted_constrained_packages = list(sorted(all_constrained_packages)) + if sorted_constrained_packages != all_constrained_packages: + problems.append( + "CONSTRAINTS entries should be in this sorted order: " f"{sorted_constrained_packages}" + ) + + return problems + + +class ValidationError(Exception): + """Raised when a validation error occurs.""" + + @staticmethod + def format_problems(config: str, problems: typing.List[str]) -> str: + """Format a list of problems with a global config variable into human-readable output. + + Parameters + ---------- + config : str + Name of the global configuration variable of concern. Prepended to the output. + problems: list[str] + A list of strings, each one a distinct problem with that config variable. + + Returns + ------- + str : + A human-readable string suitable for console, listing the problems as bullet points. + """ + formatted = [] + for p in problems: + assert isinstance(p, str), f"problems element not a str: {p}" + formatted.append( + "\n".join( + textwrap.wrap( + f"{config}: {p}", width=80, initial_indent=" * ", subsequent_indent=" " + ) + ) + ) + + return "\n".join(formatted) + + def __init__(self, config: str, problems: typing.List[str]): + """Describes an error that occurs validating one of the global config variables. + + Parameters + ---------- + config : str + Name of the global configuration variable of concern. Prepended to the output. + problems: list[str] + A list of strings, each one a distinct problem with that config variable. + """ + super(ValidationError, self).__init__(self.format_problems(config, problems)) + self.problems = problems + + +def validate_or_raise(): + problems = validate_requirements_by_piece() + if problems: + raise ValidationError("REQUIREMENTS_BY_PIECE", problems) + + problems = validate_constraints() + if problems: + raise ValidationError("CONSTRAINTS", problems) + + +def semver_to_requirements(dep: str, constraint: str, joined_deps: typing.List[str]): + """Convert a SemVer-style constraint to a setuptools-compatible constraint. + + Parameters + ---------- + dep : str + Name of the PyPI package to depend on. + constraint : str + The SemVer constraint, of the form "^" + joined_deps : list[str] + A list of strings, each a setuptools-compatible constraint which could be written to + a line in requirements.txt. The converted constraint is appended to this list. + """ + problems: typing.List[str] = [] + min_ver_parts, fixed_index, fixed_part = parse_semver(dep, constraint, problems) + text_problems = "\n" + "\n".join(f" * {p}" for p in problems) + assert ( + not problems + ), f"should not happen: validated semver {constraint} parses with problems:{text_problems}" + + max_ver_parts = ( + min_ver_parts[:fixed_index] + + [str(fixed_part + 1)] + + ["0" for _ in min_ver_parts[fixed_index + 1 :]] + ) + joined_deps.append(f'{dep}>={".".join(min_ver_parts)},<{".".join(max_ver_parts)}') + + +def join_requirements() -> typing.Dict[str, typing.Tuple[str, typing.List[str]]]: + """Validate, then join REQUIRMENTS_BY_PIECE against CONSTRAINTS and return the result. + + Returns + ------- + An OrderedDict containing REQUIREMENTS_BY_PIECE, except any dependency mentioned in CONSTRAINTS + is replaced by a setuptools-compatible constraint. + """ + validate_or_raise() + + constraints_map = collections.OrderedDict([(p.lower(), c) for (p, c) in CONSTRAINTS]) + + to_return = collections.OrderedDict() + all_deps = set() + for piece, (description, deps) in REQUIREMENTS_BY_PIECE: + joined_deps = [] + for d in deps: + constraint = constraints_map.get(d.lower()) + if constraint is None: + joined_deps.append(d) + continue + + if constraint[0] == "^": + semver_to_requirements(d, constraint, joined_deps) + else: + joined_deps.append(f"{d}{constraint}") + + if piece != "dev": + all_deps.update(joined_deps) + + to_return[piece] = (description, joined_deps) + + to_return["all-prod"] = ( + "Combined dependencies for all TVM pieces, excluding dev", + list(sorted(all_deps)), + ) + + return to_return + + +def join_and_write_requirements(args: argparse.Namespace): + try: + joined_deps = join_requirements() + except ValidationError as e: + print(f"ERROR: invalid requirements configuration in {__file__}:", file=sys.stderr) + print(str(e), file=sys.stderr) + sys.exit(2) + + if args.lint: + sys.exit(0) + + output_dir = os.path.join(os.path.dirname(__file__), "requirements") + if not os.path.exists(output_dir): + os.makedirs(output_dir) + elif not os.path.isdir(output_dir): + print( + f"ERROR: output directory {output_dir} exists but is not a dir. Delete it", + file=sys.stderr, + ) + sys.exit(2) + + for piece, (description, deps) in joined_deps.items(): + with open(os.path.join(output_dir, f"{piece}.txt"), "w") as f: + f.write( + f"# AUTOGENERATED by python/gen_requirements.py{os.linesep}" + f"#{os.linesep}" + f"# {description}{os.linesep}" + ) + for d in deps: + f.write(f"{d}{os.linesep}") + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser() + parser.add_argument( + "--lint", action="store_true", help="Just lint dependencies, don't generate anything" + ) + return parser.parse_args() + + +def main(): + args = parse_args() + join_and_write_requirements(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/setup.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..594ab5fc8defe2b4bcaa8b7da4b961fed0b994d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/setup.py @@ -0,0 +1,295 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, exec-used +"""Setup TVM package.""" +import os +import pathlib +import shutil +import sys +import sysconfig + +from setuptools import find_packages +from setuptools.dist import Distribution + +# need to use distutils.core for correct placement of cython dll +if "--inplace" in sys.argv: + from distutils.core import setup + from distutils.extension import Extension +else: + from setuptools import setup + from setuptools.extension import Extension + +CURRENT_DIR = os.path.dirname(__file__) +FFI_MODE = os.environ.get("TVM_FFI", "auto") +CONDA_BUILD = os.getenv("CONDA_BUILD") is not None +INPLACE_BUILD = "--inplace" in sys.argv + + +def get_lib_path(): + """Get library path, name and version""" + # We can not import `libinfo.py` in setup.py directly since __init__.py + # Will be invoked which introduces dependencies + libinfo_py = os.path.join(CURRENT_DIR, "./tvm/_ffi/libinfo.py") + libinfo = {"__file__": libinfo_py} + exec(compile(open(libinfo_py, "rb").read(), libinfo_py, "exec"), libinfo, libinfo) + version = libinfo["__version__"] + if not CONDA_BUILD and not INPLACE_BUILD: + lib_path = libinfo["find_lib_path"]() + libs = [lib_path[0]] + if "runtime" not in libs[0]: + for name in lib_path[1:]: + if "runtime" in name: + libs.append(name) + break + + # Add byoc shared libraries, if present + for name in lib_path: + if "3rdparty" in name: + libs.append(name) + + # Add standalone_crt, if present + for name in lib_path: + candidate_path = os.path.join(os.path.dirname(name), "standalone_crt") + if os.path.isdir(candidate_path): + libs.append(candidate_path) + break + + # Add microTVM template projects + for name in lib_path: + candidate_path = os.path.join(os.path.dirname(name), "microtvm_template_projects") + if os.path.isdir(candidate_path): + libs.append(candidate_path) + break + + # Add tvmc configuration json files + for name in lib_path: + candidate_path = os.path.abspath(os.path.join(os.path.dirname(name), "..", "configs")) + if os.path.isdir(candidate_path): + libs.append(candidate_path) + break + + for dir in [ + "3rdparty", + "jvm", + "web", + "rust", + "golang", + "include", + "src", + "cmake", + "CMakeLists.txt", + ]: + for name in lib_path: + candidate_path = os.path.abspath(os.path.join(os.path.dirname(name), "..", dir)) + if os.path.exists(candidate_path): + libs.append(candidate_path) + if dir == "3rdparty": + # remove large files + _remove_path(os.path.join(candidate_path, "cutlass", "docs")) + _remove_path(os.path.join(candidate_path, "cutlass", "media")) + _remove_path( + os.path.join(candidate_path, "cutlass_fpA_intB_gemm", "cutlass", "docs") + ) + _remove_path( + os.path.join( + candidate_path, "cutlass_fpA_intB_gemm", "cutlass", "media" + ) + ) + _remove_path( + os.path.join(candidate_path, "libflash_attn", "cutlass", "docs") + ) + _remove_path( + os.path.join(candidate_path, "libflash_attn", "cutlass", "media") + ) + break + else: + libs = None + + return libs, version + + +def git_describe_version(original_version): + """Get git describe version.""" + ver_py = os.path.join(CURRENT_DIR, "..", "version.py") + libver = {"__file__": ver_py} + exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver) + _, gd_version = libver["git_describe_version"]() + if gd_version != original_version and "--inplace" not in sys.argv: + print("Use git describe based version %s" % gd_version) + return gd_version + + +def _remove_path(path): + if os.path.exists(path): + if os.path.isfile(path): + os.remove(path) + elif os.path.isdir(path): + shutil.rmtree(path) + + +LIB_LIST, __version__ = get_lib_path() +__version__ = git_describe_version(__version__) + + +def config_cython(): + """Try to configure cython and return cython configuration""" + if FFI_MODE not in ("cython"): + if os.name == "nt" and not CONDA_BUILD: + print("WARNING: Cython is not supported on Windows, will compile without cython module") + return [] + sys_cflags = sysconfig.get_config_var("CFLAGS") + if sys_cflags and "i386" in sys_cflags and "x86_64" in sys_cflags: + print("WARNING: Cython library may not be compiled correctly with both i386 and x64") + return [] + try: + from Cython.Build import cythonize + + # from setuptools.extension import Extension + if sys.version_info >= (3, 0): + subdir = "_cy3" + else: + subdir = "_cy2" + ret = [] + path = "tvm/_ffi/_cython" + extra_compile_args = ["-std=c++17", "-DDMLC_USE_LOGGING_LIBRARY="] + if os.name == "nt": + library_dirs = ["tvm", "../build/Release", "../build"] + libraries = ["tvm"] + extra_compile_args = [ + "/std:c++17", + "/D DMLC_USE_LOGGING_LIBRARY=", + ] + # library is available via conda env. + if CONDA_BUILD: + library_dirs = [os.environ["LIBRARY_LIB"]] + else: + library_dirs = None + libraries = None + + for fn in os.listdir(path): + if not fn.endswith(".pyx"): + continue + ret.append( + Extension( + "tvm._ffi.%s.%s" % (subdir, fn[:-4]), + ["tvm/_ffi/_cython/%s" % fn], + include_dirs=[ + "../include/", + "../3rdparty/dmlc-core/include", + "../3rdparty/dlpack/include", + ], + extra_compile_args=extra_compile_args, + library_dirs=library_dirs, + libraries=libraries, + language="c++", + ) + ) + return cythonize(ret, compiler_directives={"language_level": 3}) + except ImportError as error: + if FFI_MODE == "cython": + raise error + print("WARNING: Cython is not installed, will compile without cython module") + return [] + + +class BinaryDistribution(Distribution): + def has_ext_modules(self): + return True + + def is_pure(self): + return False + + +setup_kwargs = {} +if not CONDA_BUILD and not INPLACE_BUILD: + with open("MANIFEST.in", "w") as fo: + for path in LIB_LIST: + if os.path.isfile(path): + shutil.copy(path, os.path.join(CURRENT_DIR, "tvm")) + _, libname = os.path.split(path) + fo.write(f"include tvm/{libname}\n") + + if os.path.isdir(path): + _, libname = os.path.split(path) + shutil.copytree(path, os.path.join(CURRENT_DIR, "tvm", libname)) + fo.write(f"recursive-include tvm/{libname} *\n") + + setup_kwargs = {"include_package_data": True} + + +def get_package_data_files(): + # Relay standard libraries + return ["relay/std/prelude.rly", "relay/std/core.rly"] + + +def long_description_contents(): + with open(pathlib.Path(CURRENT_DIR).resolve().parent / "README.md", encoding="utf-8") as readme: + description = readme.read() + + return description + + +# Temporarily add this directory to the path so we can import the requirements generator +# tool. +sys.path.insert(0, os.path.dirname(__file__)) +import gen_requirements + +sys.path.pop(0) + +requirements = gen_requirements.join_requirements() +extras_require = { + piece: deps for piece, (_, deps) in requirements.items() if piece not in ("all", "core") +} + +setup( + name="tvm", + version=__version__, + description="TVM: An End to End Tensor IR/DSL Stack for Deep Learning Systems", + long_description=long_description_contents(), + long_description_content_type="text/markdown", + url="https://tvm.apache.org/", + download_url="https://github.com/apache/tvm/tags", + author="Apache TVM", + license="Apache", + # See https://pypi.org/classifiers/ + classifiers=[ + "License :: OSI Approved :: Apache Software License", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + ], + keywords="machine learning", + zip_safe=False, + entry_points={"console_scripts": ["tvmc = tvm.driver.tvmc.main:main"]}, + install_requires=requirements["core"][1], + extras_require=extras_require, + packages=find_packages(), + package_dir={"tvm": "tvm"}, + package_data={"tvm": get_package_data_files()}, + distclass=BinaryDistribution, + ext_modules=config_cython(), + **setup_kwargs, +) + + +if not CONDA_BUILD and not INPLACE_BUILD: + # Wheel cleanup + os.remove("MANIFEST.in") + for path in LIB_LIST: + _, libname = os.path.split(path) + _remove_path(f"tvm/{libname}") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f5970ec1367241056bbb10f068857493f9316bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__init__.py @@ -0,0 +1,125 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=redefined-builtin, wildcard-import +"""TVM: Open Deep Learning Compiler Stack.""" +import multiprocessing +import sys +import os +import traceback + +# top-level alias +# tvm._ffi +from ._ffi.base import TVMError, __version__, _RUNTIME_ONLY + +from ._ffi.runtime_ctypes import DataTypeCode, DataType +from ._ffi import register_object, register_func, register_extension, get_global_func + +# top-level alias +# tvm.runtime +from .runtime.object import Object +from .runtime.ndarray import device, cpu, cuda, gpu, opencl, cl, vulkan, metal, mtl +from .runtime.ndarray import vpi, rocm, ext_dev, hexagon +from .runtime import ndarray as nd + +# tvm.error +from . import error + +# tvm.ir +from .ir import IRModule +from .ir import transform +from .ir import instrument +from .ir import container +from .ir import PoolInfo +from .ir import WorkspacePoolInfo +from .ir import ConstantPoolInfo +from .ir import PoolInfoProperties +from .ir import WorkspaceMemoryPools +from .ir import ConstantMemoryPools +from . import ir + +# tvm.tir +from . import tir + +# tvm.target +from . import target + +# tvm.te +from . import te + +# tvm.driver +from .driver import build, lower + +# tvm.parser +from . import parser + +# others +from . import arith + +# support infra +from . import support + +# Contrib initializers +from .contrib import rocm as _rocm, nvcc as _nvcc, hipcc as _hipcc, sdaccel as _sdaccel + +# Relay and Relax contain modules that are only available in compiler package +# Do not import them if TVM is built with runtime only +if not _RUNTIME_ONLY: + from . import relay + from . import relax + +if not _RUNTIME_ONLY and support.libinfo().get("USE_MICRO", "OFF") == "ON": + from . import micro + +# NOTE: This file should be python2 compatible so we can +# raise proper error message when user run the package using +# an older version of the python + + +def _should_print_backtrace(): + in_pytest = "PYTEST_CURRENT_TEST" in os.environ + tvm_backtrace = os.environ.get("TVM_BACKTRACE", "0") + + try: + tvm_backtrace = bool(int(tvm_backtrace)) + except ValueError: + raise ValueError( + "invalid value for TVM_BACKTRACE {}, please set to 0 or 1.".format(tvm_backtrace) + ) + + return in_pytest or tvm_backtrace + + +def tvm_wrap_excepthook(exception_hook): + """Wrap given excepthook with TVM additional work.""" + + def wrapper(exctype, value, trbk): + """Clean subprocesses when TVM is interrupted.""" + if exctype is error.DiagnosticError and not _should_print_backtrace(): + # TODO(@jroesch): consider moving to C++? + print("note: run with `TVM_BACKTRACE=1` environment variable to display a backtrace.") + else: + exception_hook(exctype, value, trbk) + + if hasattr(multiprocessing, "active_children"): + # pylint: disable=not-callable + for p in multiprocessing.active_children(): + p.terminate() + + return wrapper + + +sys.excepthook = tvm_wrap_excepthook(sys.excepthook) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3d38fc04bf2943480e6486e260f34fda919a40d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdd6cc6882f9fd5c1af90622250148752e72a3ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/error.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..854c79eebc9877788ec756a779112829cbc70aa3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/error.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/error.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/error.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11ffbdc412c3329a25ab7aaa72e01dbf86547366 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/error.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/generic.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36103ba9587b11bfb69e39288e461f78a601ab6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/generic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/parser.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c84badc6c06050d4557f6b6f25f3a3c7a321ac1b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/parser.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/parser.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/parser.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ad30aa56e24f0e9d04dadf527b5882afcc0aa54 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/parser.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/support.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aed91d694a03117a462256c9697d8c26a8b69755 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/support.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/support.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/support.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdb02d7bcef8cac3a5def0004fa750b0d615bbae Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/__pycache__/support.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..684ddafd27ece985fa42daf171e46a18468a2b55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__init__.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""C interfacing code. + +This namespace contains everything that interacts with C code. +Most TVM C related object are ctypes compatible, which means +they contains a handle field that is ctypes.c_void_p and can +be used via ctypes function calls. + +Some performance critical functions are implemented by cython +and have a ctypes fallback implementation. +""" +from . import _pyversion +from .base import register_error +from .registry import register_object, register_func, register_extension +from .registry import _init_api, get_global_func, get_object_type_index diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de9c349092d2918a90c7a01d6355e03d940844d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78248cacf761d5ee9b9c8d330b5a2374055583ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/_pyversion.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/_pyversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d787ec7a8ec901c2f0578392c4a6bd35959cc035 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/_pyversion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/_pyversion.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/_pyversion.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eeafb2ba779720aef5bd6d7bf9ff3ec38a43c0d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/_pyversion.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4a68157cf7a01076162e50df2392a2d80e52adb Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/base.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/base.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdef929ea6a8bd667a31ae1e3460bd168b1718f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/base.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/libinfo.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/libinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..259b0648c07ed907802a7d229f797eb2af0b890b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/libinfo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/libinfo.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/libinfo.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..687cf1399ccc7bc5dfe84ecb7a42913086cb86cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/libinfo.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/registry.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40b76dd7bdb554fe7b6d5e1b24d5b0b17c4279e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/registry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/registry.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/registry.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca90495e3dbde377c7868839c824a30a862a2663 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/registry.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/runtime_ctypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/runtime_ctypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa871b5ccd5079f2f55e1b7a8daf37ab748b3800 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/runtime_ctypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/runtime_ctypes.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/runtime_ctypes.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95d2a5c638cdac0ab17f465bdbae4cbc15c3372e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/__pycache__/runtime_ctypes.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2851f4b27f3bf26146cb12a66d7d788a6049a1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""ctypes specific implementation of FFI""" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c56cf9bd8c4491e3fe5428ebeaf38eeee50caaa2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a73be69d3fd762e499fb900690863077b67ed5cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/ndarray.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/ndarray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ba93782779212dddb908acb4f9448c78ecace3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/ndarray.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/ndarray.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/ndarray.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9368dfa04653dec2524ebbee4b4686a6073c5c85 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/ndarray.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/object.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/object.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9766ee20d5479e5b066894042a31e8ffb9e53fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/object.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/object.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/object.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ba15b5b0abedbf4549cf08b928c48159ea24870 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/object.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/packed_func.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/packed_func.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7108d129208c3f8e59c4b2b3f421c599288387a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/packed_func.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/packed_func.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/packed_func.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10c6a3c7b244bd9e01369158c141bee19427d856 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/packed_func.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..362c309137987d36e1ac5bc7113c52342e584c21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/types.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/types.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0e5e8313ac6f8ba9079fb73882b5fc9df81ea51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/__pycache__/types.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/ndarray.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/ndarray.py new file mode 100644 index 0000000000000000000000000000000000000000..fc8768448dde4187c3e085f3983a1fcde505fecb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/ndarray.py @@ -0,0 +1,151 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Runtime NDArray api""" +import ctypes +from ..base import _LIB, check_call, c_str +from ..runtime_ctypes import TVMArrayHandle +from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _return_handle + + +TVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p) +_c_str_dltensor = c_str("dltensor") +_c_str_used_dltensor = c_str("used_dltensor") + + +# used for PyCapsule manipulation +if hasattr(ctypes, "pythonapi"): + ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p + ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p + ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object + + +def _from_dlpack(dltensor): + dltensor = ctypes.py_object(dltensor) + if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor): + ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor) + # enforce type to make sure it works for all ctypes + ptr = ctypes.cast(ptr, ctypes.c_void_p) + handle = TVMArrayHandle() + check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle))) + ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor) + ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0)) + return _make_array(handle, False, False) + raise ValueError("Expect a dltensor field, PyCapsule can only be consumed once") + + +def _dlpack_deleter(pycapsule): + pycapsule = ctypes.cast(pycapsule, ctypes.py_object) + if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor): + ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor) + # enforce type to make sure it works for all ctypes + ptr = ctypes.cast(ptr, ctypes.c_void_p) + _LIB.TVMDLManagedTensorCallDeleter(ptr) + ctypes.pythonapi.PyCapsule_SetDestructor(pycapsule, None) + + +_c_dlpack_deleter = TVMPyCapsuleDestructor(_dlpack_deleter) + + +class NDArrayBase(object): + """A simple Device/CPU Array object in runtime.""" + + __slots__ = ["handle", "is_view"] + # pylint: disable=no-member + def __init__(self, handle, is_view=False): + """Initialize the function with handle + + Parameters + ---------- + handle : TVMArrayHandle + the handle to the underlying C++ TVMArray + """ + self.handle = handle + self.is_view = is_view + + def __del__(self): + if not self.is_view and _LIB: + check_call(_LIB.TVMArrayFree(self.handle)) + + @property + def _tvm_handle(self): + return ctypes.cast(self.handle, ctypes.c_void_p).value + + def _copyto(self, target_nd): + """Internal function that implements copy to target ndarray.""" + check_call(_LIB.TVMArrayCopyFromTo(self.handle, target_nd.handle, None)) + return target_nd + + @property + def shape(self): + """Shape of this array""" + return tuple(self.handle.contents.shape[i] for i in range(self.handle.contents.ndim)) + + def to_dlpack(self): + """Produce an array from a DLPack Tensor without copying memory + + Returns + ------- + dlpack : DLPack tensor view of the array data + """ + handle = ctypes.c_void_p() + check_call(_LIB.TVMArrayToDLPack(self.handle, ctypes.byref(handle))) + return ctypes.pythonapi.PyCapsule_New(handle, _c_str_dltensor, _c_dlpack_deleter) + + +def _make_array(handle, is_view, is_container): + global _TVM_ND_CLS + handle = ctypes.cast(handle, TVMArrayHandle) + if is_container: + tindex = ctypes.c_uint() + check_call(_LIB.TVMArrayGetTypeIndex(handle, ctypes.byref(tindex))) + cls = _TVM_ND_CLS.get(tindex.value, _CLASS_NDARRAY) + else: + cls = _CLASS_NDARRAY + + ret = cls.__new__(cls) + ret.handle = handle + ret.is_view = is_view + return ret + + +_TVM_COMPATS = () + + +def _reg_extension(cls, fcreate): + global _TVM_COMPATS + _TVM_COMPATS += (cls,) + if fcreate: + fret = lambda x: fcreate(_return_handle(x)) + RETURN_SWITCH[cls._tvm_tcode] = fret + C_TO_PY_ARG_SWITCH[cls._tvm_tcode] = _wrap_arg_func(fret, cls._tvm_tcode) + + +_TVM_ND_CLS = {} + + +def _register_ndarray(index, cls): + global _TVM_ND_CLS + _TVM_ND_CLS[index] = cls + + +_CLASS_NDARRAY = None + + +def _set_class_ndarray(cls): + global _CLASS_NDARRAY + _CLASS_NDARRAY = cls diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/object.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/object.py new file mode 100644 index 0000000000000000000000000000000000000000..520e0e42ebbef38045ec3606499ff8c3319ea874 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/object.py @@ -0,0 +1,167 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Runtime Object api""" +import ctypes +from ..base import _LIB, check_call +from .types import ArgTypeCode, RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func +from .ndarray import _register_ndarray, NDArrayBase + + +ObjectHandle = ctypes.c_void_p +__init_by_constructor__ = None + +"""Maps object type index to its constructor""" +OBJECT_TYPE = {} + +"""Maps object type to its type index""" +OBJECT_INDEX = {} + +_CLASS_OBJECT = None + + +def _set_class_object(object_class): + global _CLASS_OBJECT + _CLASS_OBJECT = object_class + + +def _register_object(index, cls): + """register object class""" + if issubclass(cls, NDArrayBase): + _register_ndarray(index, cls) + return + OBJECT_TYPE[index] = cls + OBJECT_INDEX[cls] = index + + +def _get_object_type_index(cls): + """get the type index of object class""" + return OBJECT_INDEX.get(cls) + + +def _return_object(x): + handle = x.v_handle + if not isinstance(handle, ObjectHandle): + handle = ObjectHandle(handle) + tindex = ctypes.c_uint() + check_call(_LIB.TVMObjectGetTypeIndex(handle, ctypes.byref(tindex))) + cls = OBJECT_TYPE.get(tindex.value, _CLASS_OBJECT) + if issubclass(cls, PyNativeObject): + obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT) + obj.handle = handle + return cls.__from_tvm_object__(cls, obj) + # Avoid calling __init__ of cls, instead directly call __new__ + # This allows child class to implement their own __init__ + obj = cls.__new__(cls) + obj.handle = handle + return obj + + +RETURN_SWITCH[ArgTypeCode.OBJECT_HANDLE] = _return_object +C_TO_PY_ARG_SWITCH[ArgTypeCode.OBJECT_HANDLE] = _wrap_arg_func( + _return_object, ArgTypeCode.OBJECT_HANDLE +) + +C_TO_PY_ARG_SWITCH[ArgTypeCode.OBJECT_RVALUE_REF_ARG] = _wrap_arg_func( + _return_object, ArgTypeCode.OBJECT_RVALUE_REF_ARG +) + + +class PyNativeObject: + """Base class of all TVM objects that also subclass python's builtin types.""" + + __slots__ = [] + + def __init_tvm_object_by_constructor__(self, fconstructor, *args): + """Initialize the internal tvm_object by calling constructor function. + + Parameters + ---------- + fconstructor : Function + Constructor function. + + args: list of objects + The arguments to the constructor + + Note + ---- + We have a special calling convention to call constructor functions. + So the return object is directly set into the object + """ + # pylint: disable=assigning-non-slot + obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT) + obj.__init_handle_by_constructor__(fconstructor, *args) + self.__tvm_object__ = obj + + +class ObjectBase(object): + """Base object for all object types""" + + __slots__ = ["handle"] + + def __del__(self): + if _LIB is not None: + try: + handle = self.handle + except AttributeError: + return + + check_call(_LIB.TVMObjectFree(handle)) + + def __init_handle_by_constructor__(self, fconstructor, *args): + """Initialize the handle by calling constructor function. + + Parameters + ---------- + fconstructor : Function + Constructor function. + + args: list of objects + The arguments to the constructor + + Note + ---- + We have a special calling convention to call constructor functions. + So the return handle is directly set into the Node object + instead of creating a new Node. + """ + # assign handle first to avoid error raising + # pylint: disable=not-callable + self.handle = None + handle = __init_by_constructor__(fconstructor, args) + if not isinstance(handle, ObjectHandle): + handle = ObjectHandle(handle) + self.handle = handle + + def same_as(self, other): + """Check object identity. + + Parameters + ---------- + other : object + The other object to compare against. + + Returns + ------- + result : bool + The comparison result. + """ + if not isinstance(other, ObjectBase): + return False + if self.handle is None: + return other.handle is None + return self.handle.value == other.handle.value diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/packed_func.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/packed_func.py new file mode 100644 index 0000000000000000000000000000000000000000..6465e0335db09486876f17a40a6b87fc6d8ce704 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/packed_func.py @@ -0,0 +1,347 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# coding: utf-8 +# pylint: disable=invalid-name, protected-access, too-many-branches +# pylint: disable=global-statement, unused-import, using-constant-test +"""Function configuration API.""" +import ctypes +import traceback +from numbers import Number, Integral + +from ..base import _LIB, get_last_ffi_error, py2cerror, check_call, raise_last_ffi_error +from ..base import c_str, string_types +from ..runtime_ctypes import DataType, TVMByteArray, Device, ObjectRValueRef +from . import ndarray as _nd +from .ndarray import NDArrayBase, _make_array +from .types import TVMValue, ArgTypeCode +from .types import TVMPackedCFunc, TVMCFuncFinalizer +from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _device_to_int64 +from .object import ObjectBase, PyNativeObject, _set_class_object +from . import object as _object + +PackedFuncHandle = ctypes.c_void_p +ModuleHandle = ctypes.c_void_p +ObjectHandle = ctypes.c_void_p +TVMRetValueHandle = ctypes.c_void_p + + +def _ctypes_free_resource(rhandle): + """callback to free resources when it is not needed.""" + pyobj = ctypes.cast(rhandle, ctypes.py_object) + ctypes.pythonapi.Py_DecRef(pyobj) + + +# Global callback that is always alive +TVM_FREE_PYOBJ = TVMCFuncFinalizer(_ctypes_free_resource) +ctypes.pythonapi.Py_IncRef(ctypes.py_object(TVM_FREE_PYOBJ)) + + +def _make_packed_func(handle, is_global): + """Make a packed function class""" + obj = _CLASS_PACKED_FUNC.__new__(_CLASS_PACKED_FUNC) + obj.is_global = is_global + obj.handle = handle + return obj + + +def convert_to_tvm_func(pyfunc): + """Convert a python function to TVM function + + Parameters + ---------- + pyfunc : python function + The python function to be converted. + + Returns + ------- + tvmfunc: tvm.nd.Function + The converted tvm function. + """ + local_pyfunc = pyfunc + + def cfun(args, type_codes, num_args, ret, _): + """ctypes function""" + num_args = num_args.value if isinstance(num_args, ctypes.c_int) else num_args + pyargs = (C_TO_PY_ARG_SWITCH[type_codes[i]](args[i]) for i in range(num_args)) + # pylint: disable=broad-except + try: + rv = local_pyfunc(*pyargs) + except Exception as err: + msg = traceback.format_exc() + msg = py2cerror(msg) + _LIB.TVMAPISetLastPythonError(ctypes.py_object(err)) + + return -1 + + if rv is not None: + if isinstance(rv, tuple): + raise ValueError("PackedFunction can only support one return value") + temp_args = [] + values, tcodes, _ = _make_tvm_args((rv,), temp_args) + if not isinstance(ret, TVMRetValueHandle): + ret = TVMRetValueHandle(ret) + if _LIB.TVMCFuncSetReturn(ret, values, tcodes, ctypes.c_int(1)) != 0: + raise_last_ffi_error() + _ = temp_args + _ = rv + return 0 + + handle = PackedFuncHandle() + f = TVMPackedCFunc(cfun) + # NOTE: We will need to use python-api to increase ref count of the f + # TVM_FREE_PYOBJ will be called after it is no longer needed. + pyobj = ctypes.py_object(f) + ctypes.pythonapi.Py_IncRef(pyobj) + if _LIB.TVMFuncCreateFromCFunc(f, pyobj, TVM_FREE_PYOBJ, ctypes.byref(handle)) != 0: + raise_last_ffi_error() + return _make_packed_func(handle, False) + + +def _make_tvm_args(args, temp_args): + """Pack arguments into c args tvm call accept""" + num_args = len(args) + values = (TVMValue * num_args)() + type_codes = (ctypes.c_int * num_args)() + for i, arg in enumerate(args): + if isinstance(arg, ObjectBase): + values[i].v_handle = arg.handle + type_codes[i] = ArgTypeCode.OBJECT_HANDLE + elif arg is None: + values[i].v_handle = None + type_codes[i] = ArgTypeCode.NULL + elif isinstance(arg, NDArrayBase): + values[i].v_handle = ctypes.cast(arg.handle, ctypes.c_void_p) + type_codes[i] = ( + ArgTypeCode.NDARRAY_HANDLE if not arg.is_view else ArgTypeCode.DLTENSOR_HANDLE + ) + elif isinstance(arg, PyNativeObject): + values[i].v_handle = arg.__tvm_object__.handle + type_codes[i] = ArgTypeCode.OBJECT_HANDLE + elif isinstance(arg, _nd._TVM_COMPATS): + values[i].v_handle = ctypes.c_void_p(arg._tvm_handle) + type_codes[i] = arg.__class__._tvm_tcode + elif isinstance(arg, Integral): + values[i].v_int64 = arg + type_codes[i] = ArgTypeCode.INT + elif isinstance(arg, Number): + values[i].v_float64 = arg + type_codes[i] = ArgTypeCode.FLOAT + elif isinstance(arg, DataType): + values[i].v_str = c_str(str(arg)) + type_codes[i] = ArgTypeCode.STR + elif isinstance(arg, Device): + values[i].v_int64 = _device_to_int64(arg) + type_codes[i] = ArgTypeCode.DLDEVICE + elif isinstance(arg, (bytearray, bytes)): + # from_buffer only taeks in bytearray. + if isinstance(arg, bytes): + byte_arr = bytearray(arg) + temp_args.append(byte_arr) + arg = byte_arr + + arr = TVMByteArray() + arr.data = ctypes.cast( + (ctypes.c_byte * len(arg)).from_buffer(arg), ctypes.POINTER(ctypes.c_byte) + ) + arr.size = len(arg) + values[i].v_handle = ctypes.c_void_p(ctypes.addressof(arr)) + temp_args.append(arr) + type_codes[i] = ArgTypeCode.BYTES + elif isinstance(arg, string_types): + values[i].v_str = c_str(arg) + type_codes[i] = ArgTypeCode.STR + elif isinstance(arg, (list, tuple, dict, _CLASS_OBJECT_GENERIC)): + arg = _FUNC_CONVERT_TO_OBJECT(arg) + values[i].v_handle = arg.handle + type_codes[i] = ArgTypeCode.OBJECT_HANDLE + temp_args.append(arg) + elif isinstance(arg, _CLASS_MODULE): + values[i].v_handle = arg.handle + type_codes[i] = ArgTypeCode.MODULE_HANDLE + elif isinstance(arg, PackedFuncBase): + values[i].v_handle = arg.handle + type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE + elif isinstance(arg, ctypes.c_void_p): + values[i].v_handle = arg + type_codes[i] = ArgTypeCode.HANDLE + elif isinstance(arg, ObjectRValueRef): + values[i].v_handle = ctypes.cast(ctypes.byref(arg.obj.handle), ctypes.c_void_p) + type_codes[i] = ArgTypeCode.OBJECT_RVALUE_REF_ARG + elif callable(arg): + arg = convert_to_tvm_func(arg) + values[i].v_handle = arg.handle + type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE + temp_args.append(arg) + else: + raise TypeError("Don't know how to handle type %s" % type(arg)) + return values, type_codes, num_args + + +class PackedFuncBase(object): + """Function base.""" + + __slots__ = ["handle", "is_global"] + # pylint: disable=no-member + def __init__(self, handle, is_global): + """Initialize the function with handle + + Parameters + ---------- + handle : PackedFuncHandle + the handle to the underlying function. + + is_global : bool + Whether this is a global function in python + """ + self.handle = handle + self.is_global = is_global + + def __del__(self): + if not self.is_global and _LIB is not None: + if _LIB.TVMFuncFree(self.handle) != 0: + raise_last_ffi_error() + + def __call__(self, *args): + """Call the function with positional arguments + + args : list + The positional arguments to the function call. + """ + temp_args = [] + values, tcodes, num_args = _make_tvm_args(args, temp_args) + ret_val = TVMValue() + ret_tcode = ctypes.c_int() + if ( + _LIB.TVMFuncCall( + self.handle, + values, + tcodes, + ctypes.c_int(num_args), + ctypes.byref(ret_val), + ctypes.byref(ret_tcode), + ) + != 0 + ): + raise_last_ffi_error() + _ = temp_args + _ = args + return RETURN_SWITCH[ret_tcode.value](ret_val) + + +def __init_handle_by_constructor__(fconstructor, args): + """Initialize handle by constructor""" + temp_args = [] + values, tcodes, num_args = _make_tvm_args(args, temp_args) + ret_val = TVMValue() + ret_tcode = ctypes.c_int() + if ( + _LIB.TVMFuncCall( + fconstructor.handle, + values, + tcodes, + ctypes.c_int(num_args), + ctypes.byref(ret_val), + ctypes.byref(ret_tcode), + ) + != 0 + ): + raise_last_ffi_error() + _ = temp_args + _ = args + assert ret_tcode.value == ArgTypeCode.OBJECT_HANDLE + handle = ret_val.v_handle + return handle + + +def _return_module(x): + """Return function""" + handle = x.v_handle + if not isinstance(handle, ModuleHandle): + handle = ModuleHandle(handle) + return _CLASS_MODULE(handle) + + +def _handle_return_func(x): + """Return function""" + handle = x.v_handle + if not isinstance(handle, PackedFuncHandle): + handle = PackedFuncHandle(handle) + return _CLASS_PACKED_FUNC(handle, False) + + +def _get_global_func(name, allow_missing=False): + handle = PackedFuncHandle() + check_call(_LIB.TVMFuncGetGlobal(c_str(name), ctypes.byref(handle))) + + if handle.value: + return _make_packed_func(handle, False) + + if allow_missing: + return None + + raise ValueError("Cannot find global function %s" % name) + + +# setup return handle for function type +_object.__init_by_constructor__ = __init_handle_by_constructor__ +RETURN_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _handle_return_func +RETURN_SWITCH[ArgTypeCode.MODULE_HANDLE] = _return_module +RETURN_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = lambda x: _make_array(x.v_handle, False, True) +C_TO_PY_ARG_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _wrap_arg_func( + _handle_return_func, ArgTypeCode.PACKED_FUNC_HANDLE +) +C_TO_PY_ARG_SWITCH[ArgTypeCode.MODULE_HANDLE] = _wrap_arg_func( + _return_module, ArgTypeCode.MODULE_HANDLE +) +C_TO_PY_ARG_SWITCH[ArgTypeCode.DLTENSOR_HANDLE] = lambda x: _make_array(x.v_handle, True, False) +C_TO_PY_ARG_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = _wrap_arg_func( + lambda x: _make_array(x.v_handle, False, True), ArgTypeCode.NDARRAY_HANDLE +) + +_CLASS_MODULE = None +_CLASS_PACKED_FUNC = None +_CLASS_OBJECT_GENERIC = None +_FUNC_CONVERT_TO_OBJECT = None + + +def _set_class_module(module_class): + """Initialize the module.""" + global _CLASS_MODULE + _CLASS_MODULE = module_class + + +def _set_class_packed_func(packed_func_class): + global _CLASS_PACKED_FUNC + _CLASS_PACKED_FUNC = packed_func_class + + +def _set_class_object_generic(object_generic_class, func_convert_to_object): + global _CLASS_OBJECT_GENERIC + global _FUNC_CONVERT_TO_OBJECT + _CLASS_OBJECT_GENERIC = object_generic_class + _FUNC_CONVERT_TO_OBJECT = func_convert_to_object + + +def _init_pythonapi_inc_def_ref(): + register_func = _LIB.TVMBackendRegisterEnvCAPI + register_func(c_str("Py_IncRef"), ctypes.pythonapi.Py_IncRef) + register_func(c_str("Py_DecRef"), ctypes.pythonapi.Py_DecRef) + register_func(c_str("PyGILState_Ensure"), ctypes.pythonapi.PyGILState_Ensure) + register_func(c_str("PyGILState_Release"), ctypes.pythonapi.PyGILState_Release) + + +_init_pythonapi_inc_def_ref() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/types.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/types.py new file mode 100644 index 0000000000000000000000000000000000000000..38d3cd72b55dc443415b61b1368ac705d2924be8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_ctypes/types.py @@ -0,0 +1,113 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""The C Types used in API.""" +# pylint: disable=invalid-name +import ctypes +import struct +from ..base import py_str, check_call, _LIB +from ..runtime_ctypes import TVMByteArray, ArgTypeCode, Device + + +class TVMValue(ctypes.Union): + """TVMValue in C API""" + + _fields_ = [ + ("v_int64", ctypes.c_int64), + ("v_float64", ctypes.c_double), + ("v_handle", ctypes.c_void_p), + ("v_str", ctypes.c_char_p), + ] + + +TVMPackedCFunc = ctypes.CFUNCTYPE( + ctypes.c_int, + ctypes.POINTER(TVMValue), + ctypes.POINTER(ctypes.c_int), + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_void_p, +) + + +TVMCFuncFinalizer = ctypes.CFUNCTYPE(None, ctypes.c_void_p) + + +def _return_handle(x): + """return handle""" + handle = x.v_handle + if not isinstance(handle, ctypes.c_void_p): + handle = ctypes.c_void_p(handle) + return handle + + +def _return_bytes(x): + """return bytes""" + handle = x.v_handle + if not isinstance(handle, ctypes.c_void_p): + handle = ctypes.c_void_p(handle) + arr = ctypes.cast(handle, ctypes.POINTER(TVMByteArray))[0] + size = arr.size + res = bytearray(size) + rptr = (ctypes.c_byte * size).from_buffer(res) + if not ctypes.memmove(rptr, arr.data, size): + raise RuntimeError("memmove failed") + return res + + +def _return_device(value): + """return Device""" + # use bit unpacking from int64 view + # We use this to get around ctypes issue on Union of Structure + data = struct.pack("=q", value.v_int64) + arr = struct.unpack("=ii", data) + return Device(arr[0], arr[1]) + + +def _wrap_arg_func(return_f, type_code): + def _wrap_func(x): + tcode = ctypes.c_int(type_code) + check_call(_LIB.TVMCbArgToReturn(ctypes.byref(x), ctypes.byref(tcode))) + return return_f(x) + + return _wrap_func + + +def _device_to_int64(dev): + """Pack context into int64 in native endian""" + data = struct.pack("=ii", dev.device_type, dev.device_id) + return struct.unpack("=q", data)[0] + + +RETURN_SWITCH = { + ArgTypeCode.INT: lambda x: x.v_int64, + ArgTypeCode.FLOAT: lambda x: x.v_float64, + ArgTypeCode.HANDLE: _return_handle, + ArgTypeCode.NULL: lambda x: None, + ArgTypeCode.STR: lambda x: py_str(x.v_str), + ArgTypeCode.BYTES: _return_bytes, + ArgTypeCode.DLDEVICE: _return_device, +} + +C_TO_PY_ARG_SWITCH = { + ArgTypeCode.INT: lambda x: x.v_int64, + ArgTypeCode.FLOAT: lambda x: x.v_float64, + ArgTypeCode.HANDLE: _return_handle, + ArgTypeCode.NULL: lambda x: None, + ArgTypeCode.STR: lambda x: py_str(x.v_str), + ArgTypeCode.BYTES: _return_bytes, + ArgTypeCode.DLDEVICE: _return_device, +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy2/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4d0a864416b35cd5dc1434b03b2158506c4a2ccd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy2/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""cython2 namespace""" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6d31aa321ba249ea8952a563a9a7c5115f1b30a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..159f3254db1d9e286d1f8628c28066d54673481a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""cython3 namespace""" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a09bf6b3037ab4af680b77a3bbbccb69f965f97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..287c82c8ff4c26c31b8665995e3e93e8edd7af24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cy3/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/base.pxi b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/base.pxi new file mode 100644 index 0000000000000000000000000000000000000000..69e1355f7d1305dfca9d80b898f1ac6d970cfff9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/base.pxi @@ -0,0 +1,217 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from ..base import raise_last_ffi_error +from libcpp.vector cimport vector +from cpython.version cimport PY_MAJOR_VERSION +from cpython cimport pycapsule +from libc.stdint cimport int32_t, int64_t, uint64_t, uint32_t, uint8_t, uint16_t +import ctypes + +cdef enum TVMArgTypeCode: + kInt = 0 + kUInt = 1 + kFloat = 2 + kTVMOpaqueHandle = 3 + kTVMNullptr = 4 + kTVMDataType = 5 + kDLDevice = 6 + kTVMDLTensorHandle = 7 + kTVMObjectHandle = 8 + kTVMModuleHandle = 9 + kTVMPackedFuncHandle = 10 + kTVMStr = 11 + kTVMBytes = 12 + kTVMNDArrayHandle = 13 + kTVMObjectRefArg = 14 + kTVMExtBegin = 15 + +cdef extern from "tvm/runtime/c_runtime_api.h": + ctypedef struct DLDataType: + uint8_t code + uint8_t bits + uint16_t lanes + + ctypedef struct DLDevice: + int device_type + int device_id + + ctypedef struct DLTensor: + void* data + DLDevice device + int ndim + DLDataType dtype + int64_t* shape + int64_t* strides + uint64_t byte_offset + + ctypedef struct DLManagedTensor: + DLTensor dl_tensor + void* manager_ctx + void (*deleter)(DLManagedTensor* self) + + ctypedef struct TVMValue: + int64_t v_int64 + double v_float64 + void* v_handle + const char* v_str + DLDataType v_type + DLDevice v_device + +ctypedef int64_t tvm_index_t +ctypedef DLTensor* DLTensorHandle +ctypedef void* TVMStreamHandle +ctypedef void* TVMRetValueHandle +ctypedef void* TVMPackedFuncHandle +ctypedef void* ObjectHandle + +ctypedef struct TVMObject: + uint32_t type_index_ + int32_t ref_counter_ + void (*deleter_)(TVMObject* self) + + +ctypedef int (*TVMPackedCFunc)( + TVMValue* args, + int* type_codes, + int num_args, + TVMRetValueHandle ret, + void* resource_handle) + +ctypedef void (*TVMPackedCFuncFinalizer)(void* resource_handle) + +# NOTE: All of TVM's C API function can be called without gil. +# for API functions that can be run long(e.g. FuncCall) +# we need to explicitly release the GIL as follows. +# +# cdef myfunc(): +# cdef int c_api_ret_code +# with nogil: +# c_api_ret_code = TVMAPIFunc(...) +# CHECK_CALL(c_apt_ret_code) +# +# Explicitly releasing the GIL enables other python threads +# to continue running while we are in TVMAPIFunc. +# Not releasing GIL explicitly is OK(and perhaps desirable) +# for short-running functions, as frequent unlocking also takes time, +# the python interpreter will release GIL in a set period. +# +# We mark the possibly long running function as nogil below. +cdef extern from "tvm/runtime/c_runtime_api.h": + void TVMAPISetLastError(const char* msg) + void TVMAPISetLastPythonError(void* py_object) except + + const char *TVMGetLastError() + int TVMFuncGetGlobal(const char* name, + TVMPackedFuncHandle* out) + int TVMFuncCall(TVMPackedFuncHandle func, + TVMValue* arg_values, + int* type_codes, + int num_args, + TVMValue* ret_val, + int* ret_type_code) nogil + int TVMFuncFree(TVMPackedFuncHandle func) + int TVMCFuncSetReturn(TVMRetValueHandle ret, + TVMValue* value, + int* type_code, + int num_ret) + int TVMFuncCreateFromCFunc(TVMPackedCFunc func, + void* resource_handle, + TVMPackedCFuncFinalizer fin, + TVMPackedFuncHandle *out) + int TVMCbArgToReturn(TVMValue* value, int* code) + int TVMArrayAlloc(tvm_index_t* shape, + tvm_index_t ndim, + DLDataType dtype, + DLDevice dev, + DLTensorHandle* out) nogil + int TVMArrayFree(DLTensorHandle handle) nogil + int TVMArrayCopyFromTo(DLTensorHandle src, + DLTensorHandle to, + TVMStreamHandle stream) nogil + int TVMArrayFromDLPack(DLManagedTensor* arr_from, + DLTensorHandle* out) nogil + int TVMArrayToDLPack(DLTensorHandle arr_from, + DLManagedTensor** out) nogil + void TVMDLManagedTensorCallDeleter(DLManagedTensor* dltensor) + int TVMObjectFree(ObjectHandle obj) + int TVMObjectGetTypeIndex(ObjectHandle obj, unsigned* out_index) + + +cdef inline py_str(const char* x): + if PY_MAJOR_VERSION < 3: + return x + else: + return x.decode("utf-8") + + +cdef inline c_str(pystr): + """Create ctypes char * from a python string + Parameters + ---------- + string : string type + python string + + Returns + ------- + str : c_char_p + A char pointer that can be passed to C API + """ + return pystr.encode("utf-8") + + +cdef inline int CHECK_CALL(int ret) except -2: + """Check the return code of the C API function call""" + # -2 brings exception + if ret == -2: + return -2 + if ret != 0: + raise_last_ffi_error() + return 0 + + +cdef inline object ctypes_handle(void* chandle): + """Cast C handle to ctypes handle.""" + return ctypes.cast(chandle, ctypes.c_void_p) + + +cdef inline void* c_handle(object handle): + """Cast C types handle to c handle.""" + cdef unsigned long long v_ptr + v_ptr = handle.value + return (v_ptr) + + +# python env API +cdef extern from "Python.h": + int PyErr_CheckSignals() + +cdef extern from "tvm/runtime/c_backend_api.h": + int TVMBackendRegisterEnvCAPI(const char* name, void* ptr) + +cdef _init_env_api(): + # Initialize env api for signal handling + # so backend can call tvm::runtime::EnvCheckSignals to check + # signal when executing a long running function. + # + # This feature is only enabled in cython for now due to problems of calling + # these functions in ctypes. + # + # When the functions are not registered, the signals will be handled + # only when the FFI function returns. + CHECK_CALL(TVMBackendRegisterEnvCAPI(c_str("PyErr_CheckSignals"), PyErr_CheckSignals)) + +_init_env_api() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/core.pyx b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/core.pyx new file mode 100644 index 0000000000000000000000000000000000000000..730f8fc1334520543d2933b0c4152230f311f7c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/core.pyx @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +include "./base.pxi" +include "./object.pxi" +include "./packed_func.pxi" +include "./ndarray.pxi" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/ndarray.pxi b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/ndarray.pxi new file mode 100644 index 0000000000000000000000000000000000000000..b88698319f7aa7fad92c9a29a060d51127d02cf0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/ndarray.pxi @@ -0,0 +1,180 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from ..runtime_ctypes import TVMArrayHandle +from cpython cimport PyCapsule_Destructor + +cdef const char* _c_str_dltensor = "dltensor" +cdef const char* _c_str_used_dltensor = "used_dltensor" + + +cdef void _c_dlpack_deleter(object pycaps): + cdef DLManagedTensor* dltensor + if pycapsule.PyCapsule_IsValid(pycaps, _c_str_dltensor): + dltensor = pycapsule.PyCapsule_GetPointer(pycaps, _c_str_dltensor) + TVMDLManagedTensorCallDeleter(dltensor) + + +def _from_dlpack(object dltensor): + cdef DLManagedTensor* ptr + cdef DLTensorHandle chandle + cdef int c_api_ret_code + if pycapsule.PyCapsule_IsValid(dltensor, _c_str_dltensor): + ptr = pycapsule.PyCapsule_GetPointer(dltensor, _c_str_dltensor) + with nogil: + c_api_ret_code = TVMArrayFromDLPack(ptr, &chandle) + CHECK_CALL(c_api_ret_code) + # set name and destructor to be empty + pycapsule.PyCapsule_SetDestructor(dltensor, NULL) + pycapsule.PyCapsule_SetName(dltensor, _c_str_used_dltensor) + return c_make_array(chandle, False, False) + raise ValueError("Expect a dltensor field, pycapsule.PyCapsule can only be consumed once") + + +cdef class NDArrayBase: + cdef DLTensor* chandle + cdef int c_is_view + + cdef inline _set_handle(self, handle): + cdef unsigned long long ptr + if handle is None: + self.chandle = NULL + else: + ptr = ctypes.cast(handle, ctypes.c_void_p).value + self.chandle = (ptr) + + property _tvm_handle: + def __get__(self): + return self.chandle + + property handle: + def __get__(self): + if self.chandle == NULL: + return None + else: + return ctypes.cast( + self.chandle, TVMArrayHandle) + + def __set__(self, value): + self._set_handle(value) + + property is_view: + def __get__(self): + return self.c_is_view != 0 + + @property + def shape(self): + """Shape of this array""" + return tuple(self.chandle.shape[i] for i in range(self.chandle.ndim)) + + def __init__(self, handle, is_view): + self._set_handle(handle) + self.c_is_view = is_view + + def __dealloc__(self): + cdef int c_api_ret_code + if self.c_is_view == 0: + with nogil: + c_api_ret_code = TVMArrayFree(self.chandle) + CHECK_CALL(c_api_ret_code) + + def _copyto(self, target_nd): + """Internal function that implements copy to target ndarray.""" + cdef int c_api_ret_code + with nogil: + c_api_ret_code = TVMArrayCopyFromTo(self.chandle, (target_nd).chandle, NULL) + CHECK_CALL(c_api_ret_code) + return target_nd + + def to_dlpack(self): + """Produce an array from a DLPack Tensor without copying memory + + Returns + ------- + dlpack : DLPack tensor view of the array data + """ + cdef DLManagedTensor* dltensor + cdef int c_api_ret_code + if self.c_is_view != 0: + raise ValueError("to_dlpack do not work with memory views") + with nogil: + c_api_ret_code = TVMArrayToDLPack(self.chandle, &dltensor) + CHECK_CALL(c_api_ret_code) + return pycapsule.PyCapsule_New(dltensor, _c_str_dltensor, _c_dlpack_deleter) + + +# Import limited object-related function from C++ side to improve the speed +# NOTE: can only use POD-C compatible object in FFI. +cdef extern from "tvm/runtime/ndarray.h" namespace "tvm::runtime": + cdef void* TVMArrayHandleToObjectHandle(DLTensorHandle handle) + + +cdef c_make_array(void* chandle, is_view, is_container): + global _TVM_ND_CLS + + if is_container: + tindex = ( + TVMArrayHandleToObjectHandle(chandle)).type_index_ + if tindex < len(_TVM_ND_CLS): + cls = _TVM_ND_CLS[tindex] + if cls is not None: + ret = cls.__new__(cls) + else: + ret = _CLASS_NDARRAY.__new__(_CLASS_NDARRAY) + else: + ret = _CLASS_NDARRAY.__new__(_CLASS_NDARRAY) + (ret).chandle = chandle + (ret).c_is_view = is_view + return ret + else: + ret = _CLASS_NDARRAY.__new__(_CLASS_NDARRAY) + (ret).chandle = chandle + (ret).c_is_view = is_view + return ret + + +cdef _TVM_COMPATS = () + +cdef _TVM_EXT_RET = {} + +def _reg_extension(cls, fcreate): + global _TVM_COMPATS + _TVM_COMPATS += (cls,) + if fcreate: + _TVM_EXT_RET[cls._tvm_tcode] = fcreate + +cdef list _TVM_ND_CLS = [] + +cdef _register_ndarray(int index, object cls): + """register object class""" + global _TVM_ND_CLS + while len(_TVM_ND_CLS) <= index: + _TVM_ND_CLS.append(None) + + _TVM_ND_CLS[index] = cls + + +def _make_array(handle, is_view, is_container): + cdef unsigned long long ptr + ptr = ctypes.cast(handle, ctypes.c_void_p).value + return c_make_array(ptr, is_view, is_container) + +cdef object _CLASS_NDARRAY = None + +def _set_class_ndarray(cls): + global _CLASS_NDARRAY + _CLASS_NDARRAY = cls diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/object.pxi b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/object.pxi new file mode 100644 index 0000000000000000000000000000000000000000..94a9310d7815e5dd3ea7d6910740013e1e958333 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/object.pxi @@ -0,0 +1,152 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Maps object type index to its constructor""" +cdef list OBJECT_TYPE = [] +"""Maps object type to its type index""" +cdef dict OBJECT_INDEX = {} + +def _register_object(int index, object cls): + """register object class""" + if issubclass(cls, NDArrayBase): + _register_ndarray(index, cls) + return + + global OBJECT_TYPE + while len(OBJECT_TYPE) <= index: + OBJECT_TYPE.append(None) + OBJECT_TYPE[index] = cls + OBJECT_INDEX[cls] = index + +def _get_object_type_index(object cls): + """get the type index of object class""" + return OBJECT_INDEX.get(cls) + +cdef inline object make_ret_object(void* chandle): + global OBJECT_TYPE + global _CLASS_OBJECT + cdef unsigned tindex + cdef object cls + cdef object handle + object_type = OBJECT_TYPE + handle = ctypes_handle(chandle) + CHECK_CALL(TVMObjectGetTypeIndex(chandle, &tindex)) + + if tindex < len(OBJECT_TYPE): + cls = OBJECT_TYPE[tindex] + if cls is not None: + if issubclass(cls, PyNativeObject): + obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT) + (obj).chandle = chandle + return cls.__from_tvm_object__(cls, obj) + obj = cls.__new__(cls) + else: + obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT) + else: + obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT) + + (obj).chandle = chandle + return obj + + +class PyNativeObject: + """Base class of all TVM objects that also subclass python's builtin types.""" + __slots__ = [] + + def __init_tvm_object_by_constructor__(self, fconstructor, *args): + """Initialize the internal tvm_object by calling constructor function. + + Parameters + ---------- + fconstructor : Function + Constructor function. + + args: list of objects + The arguments to the constructor + + Note + ---- + We have a special calling convention to call constructor functions. + So the return object is directly set into the object + """ + obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT) + obj.__init_handle_by_constructor__(fconstructor, *args) + self.__tvm_object__ = obj + + +cdef class ObjectBase: + cdef void* chandle + + cdef inline _set_handle(self, handle): + cdef unsigned long long ptr + if handle is None: + self.chandle = NULL + else: + ptr = handle.value + self.chandle = (ptr) + + property handle: + def __get__(self): + return ctypes_handle(self.chandle) + + def __set__(self, value): + self._set_handle(value) + + def __dealloc__(self): + CHECK_CALL(TVMObjectFree(self.chandle)) + + def __init_handle_by_constructor__(self, fconstructor, *args): + """Initialize the handle by calling constructor function. + + Parameters + ---------- + fconstructor : Function + Constructor function. + + args: list of objects + The arguments to the constructor + + Note + ---- + We have a special calling convention to call constructor functions. + So the return handle is directly set into the Node object + instead of creating a new Node. + """ + # avoid error raised during construction. + self.chandle = NULL + cdef void* chandle + ConstructorCall( + (fconstructor).chandle, + kTVMObjectHandle, args, &chandle) + self.chandle = chandle + + def same_as(self, other): + """Check object identity. + + Parameters + ---------- + other : object + The other object to compare against. + + Returns + ------- + result : bool + The comparison result. + """ + if not isinstance(other, ObjectBase): + return False + return self.chandle == (other).chandle diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/packed_func.pxi b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/packed_func.pxi new file mode 100644 index 0000000000000000000000000000000000000000..3d1e87bf563d3fa4f9f21cd1b66c58b2c59829c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_cython/packed_func.pxi @@ -0,0 +1,387 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import ctypes +import traceback +from cpython cimport Py_INCREF, Py_DECREF, PyGILState_Ensure, PyGILState_Release +from numbers import Number, Integral +from ..base import string_types, py2cerror +from ..runtime_ctypes import DataType, Device, TVMByteArray, ObjectRValueRef + + +cdef void tvm_callback_finalize(void* fhandle) with gil: + local_pyfunc = (fhandle) + Py_DECREF(local_pyfunc) + +cdef int tvm_callback(TVMValue* args, + int* type_codes, + int num_args, + TVMRetValueHandle ret, + void* fhandle) with gil: + cdef list pyargs + cdef TVMValue value + cdef int tcode + local_pyfunc = (fhandle) + pyargs = [] + for i in range(num_args): + value = args[i] + tcode = type_codes[i] + if (tcode == kTVMObjectHandle or + tcode == kTVMPackedFuncHandle or + tcode == kTVMModuleHandle or + tcode == kTVMNDArrayHandle or + tcode == kTVMObjectRefArg or + tcode > kTVMExtBegin): + CHECK_CALL(TVMCbArgToReturn(&value, &tcode)) + + if tcode != kTVMDLTensorHandle: + pyargs.append(make_ret(value, tcode)) + else: + pyargs.append(c_make_array(value.v_handle, True, False)) + try: + rv = local_pyfunc(*pyargs) + except Exception as err: + msg = traceback.format_exc() + msg = py2cerror(msg) + TVMAPISetLastPythonError(err) + + return -1 + if rv is not None: + if isinstance(rv, tuple): + raise ValueError("PackedFunction can only support one return value") + temp_args = [] + make_arg(rv, &value, &tcode, temp_args) + CHECK_CALL(TVMCFuncSetReturn(ret, &value, &tcode, 1)) + return 0 + + +cdef object make_packed_func(TVMPackedFuncHandle chandle, int is_global): + obj = _CLASS_PACKED_FUNC.__new__(_CLASS_PACKED_FUNC) + (obj).chandle = chandle + (obj).is_global = is_global + return obj + + +def convert_to_tvm_func(object pyfunc): + """Convert a python function to TVM function + + Parameters + ---------- + pyfunc : python function + The python function to be converted. + + Returns + ------- + tvmfunc: tvm.Function + The converted tvm function. + """ + cdef TVMPackedFuncHandle chandle + Py_INCREF(pyfunc) + CHECK_CALL(TVMFuncCreateFromCFunc(tvm_callback, + (pyfunc), + tvm_callback_finalize, + &chandle)) + return make_packed_func(chandle, False) + + +cdef inline int make_arg(object arg, + TVMValue* value, + int* tcode, + list temp_args) except -1: + """Pack arguments into c args tvm call accept""" + cdef unsigned long long ptr + if isinstance(arg, ObjectBase): + value[0].v_handle = (arg).chandle + tcode[0] = kTVMObjectHandle + elif isinstance(arg, NDArrayBase): + value[0].v_handle = (arg).chandle + tcode[0] = (kTVMNDArrayHandle if + not (arg).c_is_view else kTVMDLTensorHandle) + elif isinstance(arg, PyNativeObject): + value[0].v_handle = ((arg.__tvm_object__)).chandle + tcode[0] = kTVMObjectHandle + elif isinstance(arg, _TVM_COMPATS): + ptr = arg._tvm_handle + value[0].v_handle = (ptr) + tcode[0] = arg.__class__._tvm_tcode + elif isinstance(arg, Integral): + value[0].v_int64 = arg + tcode[0] = kInt + elif isinstance(arg, float): + value[0].v_float64 = arg + tcode[0] = kFloat + elif isinstance(arg, str): + tstr = c_str(arg) + value[0].v_str = tstr + tcode[0] = kTVMStr + temp_args.append(tstr) + elif arg is None: + value[0].v_handle = NULL + tcode[0] = kTVMNullptr + elif isinstance(arg, Number): + value[0].v_float64 = arg + tcode[0] = kFloat + elif isinstance(arg, DataType): + tstr = c_str(str(arg)) + value[0].v_str = tstr + tcode[0] = kTVMStr + temp_args.append(tstr) + elif isinstance(arg, Device): + value[0].v_device = (( + ctypes.addressof(arg)))[0] + tcode[0] = kDLDevice + elif isinstance(arg, (bytes, bytearray)): + # from_buffer only taeks in bytearray. + if isinstance(arg, bytes): + byte_arr = bytearray(arg) + temp_args.append(byte_arr) + arg = byte_arr + + arr = TVMByteArray() + arr.data = ctypes.cast( + (ctypes.c_byte * len(arg)).from_buffer(arg), + ctypes.POINTER(ctypes.c_byte)) + arr.size = len(arg) + value[0].v_handle = ( + ctypes.addressof(arr)) + tcode[0] = kTVMBytes + temp_args.append(arr) + elif isinstance(arg, string_types): + tstr = c_str(arg) + value[0].v_str = tstr + tcode[0] = kTVMStr + temp_args.append(tstr) + elif isinstance(arg, (list, tuple, dict, _CLASS_OBJECT_GENERIC)): + arg = _FUNC_CONVERT_TO_OBJECT(arg) + value[0].v_handle = (arg).chandle + tcode[0] = kTVMObjectHandle + temp_args.append(arg) + elif isinstance(arg, _CLASS_MODULE): + value[0].v_handle = c_handle(arg.handle) + tcode[0] = kTVMModuleHandle + elif isinstance(arg, PackedFuncBase): + value[0].v_handle = (arg).chandle + tcode[0] = kTVMPackedFuncHandle + elif isinstance(arg, ctypes.c_void_p): + value[0].v_handle = c_handle(arg) + tcode[0] = kTVMOpaqueHandle + elif isinstance(arg, ObjectRValueRef): + value[0].v_handle = &(((arg.obj)).chandle) + tcode[0] = kTVMObjectRefArg + elif callable(arg): + arg = convert_to_tvm_func(arg) + value[0].v_handle = (arg).chandle + tcode[0] = kTVMPackedFuncHandle + temp_args.append(arg) + else: + raise TypeError("Don't know how to handle type %s" % type(arg)) + return 0 + + +cdef inline bytearray make_ret_bytes(void* chandle): + handle = ctypes_handle(chandle) + arr = ctypes.cast(handle, ctypes.POINTER(TVMByteArray))[0] + size = arr.size + res = bytearray(size) + rptr = (ctypes.c_byte * size).from_buffer(res) + if not ctypes.memmove(rptr, arr.data, size): + raise RuntimeError('memmove failed') + return res + + +cdef inline object make_ret(TVMValue value, int tcode): + """convert result to return value.""" + if tcode == kTVMObjectHandle: + return make_ret_object(value.v_handle) + elif tcode == kTVMNullptr: + return None + elif tcode == kInt: + return value.v_int64 + elif tcode == kFloat: + return value.v_float64 + elif tcode == kTVMNDArrayHandle: + return c_make_array(value.v_handle, False, True) + elif tcode == kTVMStr: + return py_str(value.v_str) + elif tcode == kTVMBytes: + return make_ret_bytes(value.v_handle) + elif tcode == kTVMOpaqueHandle: + return ctypes_handle(value.v_handle) + elif tcode == kDLDevice: + return Device(value.v_device.device_type, value.v_device.device_id) + elif tcode == kTVMModuleHandle: + return _CLASS_MODULE(ctypes_handle(value.v_handle)) + elif tcode == kTVMPackedFuncHandle: + return make_packed_func(value.v_handle, False) + elif tcode in _TVM_EXT_RET: + return _TVM_EXT_RET[tcode](ctypes_handle(value.v_handle)) + + raise ValueError("Unhandled type code %d" % tcode) + + +cdef inline int FuncCall3(void* chandle, + tuple args, + int nargs, + TVMValue* ret_val, + int* ret_tcode) except -1: + cdef TVMValue[3] values + cdef int[3] tcodes + nargs = len(args) + temp_args = [] + for i in range(nargs): + make_arg(args[i], &values[i], &tcodes[i], temp_args) + + with nogil: + c_api_ret_code = TVMFuncCall(chandle, &values[0], &tcodes[0], + nargs, ret_val, ret_tcode) + + CHECK_CALL(c_api_ret_code) + return 0 + +cdef inline int FuncCall(void* chandle, + tuple args, + TVMValue* ret_val, + int* ret_tcode) except -1: + cdef int nargs + cdef int c_api_ret_code + nargs = len(args) + if nargs <= 3: + FuncCall3(chandle, args, nargs, ret_val, ret_tcode) + return 0 + + cdef vector[TVMValue] values + cdef vector[int] tcodes + values.resize(max(nargs, 1)) + tcodes.resize(max(nargs, 1)) + temp_args = [] + for i in range(nargs): + make_arg(args[i], &values[i], &tcodes[i], temp_args) + + with nogil: + c_api_ret_code = TVMFuncCall(chandle, &values[0], &tcodes[0], + nargs, ret_val, ret_tcode) + CHECK_CALL(c_api_ret_code) + return 0 + + +cdef inline int ConstructorCall(void* constructor_handle, + int type_code, + tuple args, + void** handle) except -1: + """Call contructor of a handle function""" + cdef TVMValue ret_val + cdef int ret_tcode + FuncCall(constructor_handle, args, &ret_val, &ret_tcode) + assert ret_tcode == type_code + handle[0] = ret_val.v_handle + return 0 + + +cdef class PackedFuncBase: + cdef TVMPackedFuncHandle chandle + cdef int is_global + + cdef inline _set_handle(self, handle): + if handle is None: + self.chandle = NULL + else: + self.chandle = c_handle(handle) + + property is_global: + def __get__(self): + return self.c_is_global != 0 + + def __set__(self, value): + self.c_is_global = value + + property handle: + def __get__(self): + if self.chandle == NULL: + return None + else: + return ctypes.cast(self.chandle, ctypes.c_void_p) + def __set__(self, value): + self._set_handle(value) + + def __init__(self, handle, is_global): + self._set_handle(handle) + self.c_is_global = is_global + + def __dealloc__(self): + if self.is_global == 0: + CHECK_CALL(TVMFuncFree(self.chandle)) + + def __call__(self, *args): + cdef TVMValue ret_val + cdef int ret_tcode + ret_tcode = kTVMNullptr + FuncCall(self.chandle, args, &ret_val, &ret_tcode) + return make_ret(ret_val, ret_tcode) + + +def _get_global_func(name, allow_missing): + cdef TVMPackedFuncHandle chandle + CHECK_CALL(TVMFuncGetGlobal(c_str(name), &chandle)) + if chandle != NULL: + return make_packed_func(chandle, True) + + if allow_missing: + return None + + raise ValueError("Cannot find global function %s" % name) + + +_CLASS_PACKED_FUNC = None +_CLASS_MODULE = None +_CLASS_OBJECT = None +_CLASS_OBJECT_GENERIC = None +_FUNC_CONVERT_TO_OBJECT = None + +def _set_class_module(module_class): + """Initialize the module.""" + global _CLASS_MODULE + _CLASS_MODULE = module_class + +def _set_class_packed_func(func_class): + global _CLASS_PACKED_FUNC + _CLASS_PACKED_FUNC = func_class + +def _set_class_object(obj_class): + global _CLASS_OBJECT + _CLASS_OBJECT = obj_class + +def _set_class_object_generic(object_generic_class, func_convert_to_object): + global _CLASS_OBJECT_GENERIC + global _FUNC_CONVERT_TO_OBJECT + _CLASS_OBJECT_GENERIC = object_generic_class + _FUNC_CONVERT_TO_OBJECT = func_convert_to_object + +# Py_INCREF and Py_DECREF are C macros, not function objects. +# Therefore, providing a wrapper function. +cdef void _py_incref_wrapper(void* py_object): + Py_INCREF(py_object) +cdef void _py_decref_wrapper(void* py_object): + Py_DECREF(py_object) + +def _init_pythonapi_inc_def_ref(): + register_func = TVMBackendRegisterEnvCAPI + register_func(c_str("Py_IncRef"), _py_incref_wrapper) + register_func(c_str("Py_DecRef"), _py_decref_wrapper) + register_func(c_str("PyGILState_Ensure"), PyGILState_Ensure) + register_func(c_str("PyGILState_Release"), PyGILState_Release) + +_init_pythonapi_inc_def_ref() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_pyversion.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_pyversion.py new file mode 100644 index 0000000000000000000000000000000000000000..b661cfd875fce768a571d40b8cedf93164a816e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/_pyversion.py @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Python version check +""" +import sys + +# ---------------------------- +# Python3 version. +# ---------------------------- +if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6): + PY3STATEMENT = "The minimal Python requirement is Python 3.6" + raise Exception(PY3STATEMENT) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/base.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/base.py new file mode 100644 index 0000000000000000000000000000000000000000..b0a63700b79b04bea297e81d713bc8685dc21f6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/base.py @@ -0,0 +1,496 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# coding: utf-8 +# pylint: disable=invalid-name, import-outside-toplevel +"""Base library for TVM FFI.""" +import ctypes +import functools +import os +import re +import sys +import types + +from typing import Callable, Sequence, Optional + +import numpy as np + +from . import libinfo + +# ---------------------------- +# library loading +# ---------------------------- +string_types = (str,) +integer_types = (int, np.int32) +numeric_types = integer_types + (float, np.float16, np.float32) + +# this function is needed for python3 +# to convert ctypes.char_p .value back to python str +if sys.platform == "win32": + + def _py_str(x): + try: + return x.decode("utf-8") + except UnicodeDecodeError: + encoding = "cp" + str(ctypes.cdll.kernel32.GetACP()) + return x.decode(encoding) + + py_str = _py_str +else: + py_str = lambda x: x.decode("utf-8") + + +def _load_lib(): + """Load libary by searching possible path.""" + lib_path = libinfo.find_lib_path() + # The dll search path need to be added explicitly in + # windows after python 3.8 + if sys.platform.startswith("win32") and sys.version_info >= (3, 8): + for path in libinfo.get_dll_directories(): + os.add_dll_directory(path) + lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL) + lib.TVMGetLastError.restype = ctypes.c_char_p + return lib, os.path.basename(lib_path[0]) + + +try: + # The following import is needed for TVM to work with pdb + import readline # pylint: disable=unused-import +except ImportError: + pass + +# version number +__version__ = libinfo.__version__ +# library instance +_LIB, _LIB_NAME = _load_lib() + +# Whether we are runtime only +_RUNTIME_ONLY = "runtime" in _LIB_NAME + +# The FFI mode of TVM +_FFI_MODE = os.environ.get("TVM_FFI", "auto") + + +# ---------------------------- +# helper function in ctypes. +# ---------------------------- +def c_str(string): + """Create ctypes char * from a python string + Parameters + ---------- + string : string type + python string + + Returns + ------- + str : c_char_p + A char pointer that can be passed to C API + """ + return ctypes.c_char_p(string.encode("utf-8")) + + +def c_array(ctype, values): + """Create ctypes array from a python array + + Parameters + ---------- + ctype : ctypes data type + data type of the array we want to convert to + + values : tuple or list + data content + + Returns + ------- + out : ctypes array + Created ctypes array + """ + return (ctype * len(values))(*values) + + +def decorate(func, fwrapped): + """A wrapper call of decorator package, differs to call time + + Parameters + ---------- + func : function + The original function + + fwrapped : function + The wrapped function + """ + import decorator + + return decorator.decorate(func, fwrapped) + + +# ----------------------------------------- +# Base code for structured error handling. +# ----------------------------------------- +# Maps error type to its constructor +ERROR_TYPE = {} + + +class TVMError(RuntimeError): + """Default error thrown by TVM functions. + + TVMError will be raised if you do not give any error type specification, + """ + + +def register_error(func_name=None, cls=None): + """Register an error class so it can be recognized by the ffi error handler. + + Parameters + ---------- + func_name : str or function or class + The name of the error function. + + cls : function + The function to create the class + + Returns + ------- + fregister : function + Register function if f is not specified. + + Examples + -------- + .. code-block:: python + + @tvm.error.register_error + class MyError(RuntimeError): + pass + + err_inst = tvm.error.create_ffi_error("MyError: xyz") + assert isinstance(err_inst, MyError) + """ + if callable(func_name): + cls = func_name + func_name = cls.__name__ + + def register(mycls): + """internal register function""" + err_name = func_name if isinstance(func_name, str) else mycls.__name__ + ERROR_TYPE[err_name] = mycls + return mycls + + if cls is None: + return register + return register(cls) + + +def _valid_error_name(name): + """Check whether name is a valid error name.""" + return all(x.isalnum() or x in "_." for x in name) + + +def _find_error_type(line): + """Find the error name given the first line of the error message. + + Parameters + ---------- + line : str + The first line of error message. + + Returns + ------- + name : str The error name + """ + if sys.platform == "win32": + # Stack traces aren't logged on Windows due to a DMLC limitation, + # so we should try to get the underlying error another way. + # DMLC formats errors "[timestamp] file:line: ErrorMessage" + # ErrorMessage is usually formatted "ErrorType: message" + # We can try to extract the error type using the final ":" + end_pos = line.rfind(":") + if end_pos == -1: + return None + start_pos = line.rfind(":", 0, end_pos) + if start_pos == -1: + err_name = line[:end_pos].strip() + else: + err_name = line[start_pos + 1 : end_pos].strip() + if _valid_error_name(err_name): + return err_name + return None + + end_pos = line.find(":") + if end_pos == -1: + return None + err_name = line[:end_pos] + if _valid_error_name(err_name): + return err_name + return None + + +def c2pyerror(err_msg): + """Translate C API error message to python style. + + Parameters + ---------- + err_msg : str + The error message. + + Returns + ------- + new_msg : str + Translated message. + + err_type : str + Detected error type. + """ + arr = err_msg.split("\n") + if arr[-1] == "": + arr.pop() + err_type = _find_error_type(arr[0]) + trace_mode = False + stack_trace = [] + message = [] + for line in arr: + if trace_mode: + if line.startswith(" ") and len(stack_trace) > 0: + stack_trace[-1] += "\n" + line + elif line.startswith(" "): + stack_trace.append(line) + else: + trace_mode = False + if not trace_mode: + if line.startswith("Stack trace"): + trace_mode = True + else: + message.append(line) + out_msg = "" + if stack_trace: + out_msg += "Traceback (most recent call last):\n" + out_msg += "\n".join(reversed(stack_trace)) + "\n" + out_msg += "\n".join(message) + return out_msg, err_type + + +def py2cerror(err_msg): + """Translate python style error message to C style. + + Parameters + ---------- + err_msg : str + The error message. + + Returns + ------- + new_msg : str + Translated message. + """ + arr = err_msg.split("\n") + if arr[-1] == "": + arr.pop() + trace_mode = False + stack_trace = [] + message = [] + for line in arr: + if trace_mode: + if line.startswith(" "): + stack_trace.append(line) + else: + trace_mode = False + if not trace_mode: + if line.find("Traceback") != -1: + trace_mode = True + else: + message.append(line) + # Remove the first error name if there are two of them. + # RuntimeError: MyErrorName: message => MyErrorName: message + head_arr = message[0].split(":", 3) + if len(head_arr) >= 3 and _valid_error_name(head_arr[1].strip()): + head_arr[1] = head_arr[1].strip() + message[0] = ":".join(head_arr[1:]) + # reverse the stack trace. + out_msg = "\n".join(message) + if stack_trace: + out_msg += "\nStack trace:\n" + out_msg += "\n".join(reversed(stack_trace)) + "\n" + return out_msg + + +def get_last_ffi_error(): + """Create error object given result of TVMGetLastError. + + Returns + ------- + err : object + The error object based on the err_msg + """ + c_err_msg = py_str(_LIB.TVMGetLastError()) + py_err_msg, err_type = c2pyerror(c_err_msg) + if err_type is not None and err_type.startswith("tvm.error."): + err_type = err_type[10:] + return ERROR_TYPE.get(err_type, TVMError)(py_err_msg) + + +def _append_traceback_frame(tb, func_name, filepath, lineno: Optional[int]): + """Append a dummy frame to appear in the Python traceback""" + + # Compile a dummy function to Python bytecode, so that with the + # filepath that we want to appear in the traceback. Any external + # debugger (e.g. pdb) that catches the exception will use the + # filepath to show code snippets from that FFI file. + header = "" if lineno is None else "\n" * (lineno - 1) + code = compile( + f"{header}def dummy_func(): raise NotImplementedError()", + filepath, + "exec", + ) + + # Replacing the name by updating the bytecode allows the function + # name to be values that would normally be forbidden by python + # syntax. For example, "operator()". + code = code.replace(co_consts=(code.co_consts[0].replace(co_name=func_name), func_name, None)) + namespace = {} + exec(code, namespace) # pylint: disable=exec-used + dummy_func = namespace["dummy_func"] + + # Execute the dummy function in order to generate a stack frame. + dummy_tb = None + try: + dummy_func() + except NotImplementedError as err: + dummy_tb = err.__traceback__ + + # Insert the dummy function into the stack trace. + new_frame = dummy_tb.tb_next + return types.TracebackType(tb, new_frame.tb_frame, new_frame.tb_lasti, new_frame.tb_lineno) + + +def _filter_traceback_frames(tb, filter_funcs: Sequence[Callable[[types.CodeType], bool]]): + orig = tb + filtered_at_least_one = False + temp_all_frames = [] + filtered_frames = [] + + while tb is not None: + frame_code = tb.tb_frame.f_code + should_remove = any(filter_func(frame_code) for filter_func in filter_funcs) + if not should_remove: + filtered_at_least_one = True + filtered_frames.append(tb) + temp_all_frames.append(tb) + tb = tb.tb_next + + if not filtered_at_least_one: + return orig + + def _append_frame(tb, next_tb_frame): + return types.TracebackType( + tb, next_tb_frame.tb_frame, next_tb_frame.tb_lasti, next_tb_frame.tb_lineno + ) + + new_tb = functools.reduce(_append_frame, reversed(filtered_frames)) + + return new_tb + + +def raise_last_ffi_error(): + """Raise the previous error from FFI + + This should be used instead of `raise get_last_ffi_error()`, as it + handle propagation of errors across an FFI boundary. For example, + if Python passes a callback to a C++ function, and the callback + raises an exception, the re-thrown exception should contain the + full stack trace, not just the stack frames that are above the + outermost FFI call. + """ + + _LIB.TVMGetLastPythonError.restype = ctypes.c_void_p + _LIB.TVMGetLastBacktrace.restype = ctypes.c_char_p + py_err = _LIB.TVMGetLastPythonError() + if py_err is None: + c_err_msg = py_str(_LIB.TVMGetLastError()) + py_err_msg, err_type = c2pyerror(c_err_msg) + if err_type is not None and err_type.startswith("tvm.error."): + err_type = err_type[10:] + py_err = ERROR_TYPE.get(err_type, TVMError)(py_err_msg) + + else: + # TVMGetLastPythonError returns a PyObject*, with NULL when + # there is no such value. If we annotated the restype as + # ctypes.py_object, we would need to return Py_None from the + # C++ implementation. This would require introducing a + # dependency on libpython that we want to avoid when not in a + # Python environment. Therefore, casting the resulting void* + # pointer to PyObject* using ctypes. + py_err = ctypes.cast(ctypes.c_void_p(py_err), ctypes.py_object).value + + tb = py_err.__traceback__ + + # The py_err.__traceback__ only goes from the location thrown + # up to the next FFI handoff. To have the stacktrace also + # include the C++ side, we need to adjust the __traceback__ + # before re-throwing. + backtrace = _LIB.TVMGetLastBacktrace() + if backtrace: + frames = re.split(r"\n\W+\d+:\W+", py_str(backtrace)) + frames = frames[1:] # Skip "Stack trace: " + + for frame in frames: + if " at " in frame: + func_name, frame = frame.split(" at ", 1) + if ":" in frame: + filename, lineno = frame.rsplit(":", 1) + lineno = int(lineno.strip()) + else: + filename = frame + lineno = None + func_name = func_name.strip() + filename = filename.strip() + + tb = _append_traceback_frame(tb, func_name, filename, lineno) + + # Remove stack frames that provide little benefit to + # debugging. These are only removed from the stack frames + # contained within the exception we are re-raising, and not to + # the stack frames that it will continue to collect. + # Therefore, there may still be a single instance of these + # frames in the outermost Python-to-FFI call. + filter_funcs = [ + lambda code: "tvm/_ffi/_ctypes/packed_func.py" in code.co_filename, + lambda code: "tvm/_ffi/base.py" in code.co_filename, + ] + tb = _filter_traceback_frames(tb, filter_funcs) + + py_err = py_err.with_traceback(tb) + + # The exception PyObject may contain a large amount of state, + # including all stack frames that may be inspected in a later + # PDB post-mortem. Therefore, we must make sure to remove the + # underlying PyObject* from the C++ side after we retrieve it. + _LIB.TVMDropLastPythonError() + + raise py_err + + +def check_call(ret): + """Check the return value of C API call + + This function will raise exception when error occurs. + Wrap every API call with this function + + Parameters + ---------- + ret : int + return value from API calls + """ + if ret != 0: + raise_last_ffi_error() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/libinfo.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/libinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..73a0a3e8e73046ccb3e21a1980eb3e3e3ea85ebf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/libinfo.py @@ -0,0 +1,250 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Library information.""" +import os +import sys + + +def split_env_var(env_var, split): + """Splits environment variable string. + + Parameters + ---------- + env_var : str + Name of environment variable. + + split : str + String to split env_var on. + + Returns + ------- + splits : list(string) + If env_var exists, split env_var. Otherwise, empty list. + """ + if os.environ.get(env_var, None): + return [p.strip() for p in os.environ[env_var].split(split)] + return [] + + +def get_dll_directories(): + """Get the possible dll directories""" + # NB: This will either be the source directory (if TVM is run + # inplace) or the install directory (if TVM is installed). + # An installed TVM's curr_path will look something like: + # $PREFIX/lib/python3.6/site-packages/tvm/_ffi + ffi_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__))) + source_dir = os.path.join(ffi_dir, "..", "..", "..") + install_lib_dir = os.path.join(ffi_dir, "..", "..", "..", "..") + + dll_path = [] + + if os.environ.get("TVM_LIBRARY_PATH", None): + dll_path.append(os.environ["TVM_LIBRARY_PATH"]) + + if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"): + dll_path.extend(split_env_var("LD_LIBRARY_PATH", ":")) + dll_path.extend(split_env_var("PATH", ":")) + elif sys.platform.startswith("darwin"): + dll_path.extend(split_env_var("DYLD_LIBRARY_PATH", ":")) + dll_path.extend(split_env_var("PATH", ":")) + elif sys.platform.startswith("win32"): + dll_path.extend(split_env_var("PATH", ";")) + + # Pip lib directory + dll_path.append(os.path.join(ffi_dir, "..")) + # Default cmake build directory + dll_path.append(os.path.join(source_dir, "build")) + dll_path.append(os.path.join(source_dir, "build", "Release")) + # Default make build directory + dll_path.append(os.path.join(source_dir, "lib")) + + dll_path.append(install_lib_dir) + + # use extra TVM_HOME environment for finding libraries. + if os.environ.get("TVM_HOME", None): + tvm_source_home_dir = os.environ["TVM_HOME"] + else: + tvm_source_home_dir = source_dir + + if os.path.isdir(tvm_source_home_dir): + dll_path.append(os.path.join(tvm_source_home_dir, "web", "dist", "wasm")) + dll_path.append(os.path.join(tvm_source_home_dir, "web", "dist")) + + dll_path = [os.path.realpath(x) for x in dll_path] + return [x for x in dll_path if os.path.isdir(x)] + + +def find_lib_path(name=None, search_path=None, optional=False): + """Find dynamic library files. + + Parameters + ---------- + name : list of str + List of names to be found. + + Returns + ------- + lib_path : list(string) + List of all found path to the libraries + """ + use_runtime = os.environ.get("TVM_USE_RUNTIME_LIB", False) + dll_path = get_dll_directories() + + if search_path is not None: + if isinstance(search_path, list): + dll_path = dll_path + search_path + else: + dll_path.append(search_path) + + if name is not None: + if isinstance(name, list): + lib_dll_path = [] + for n in name: + lib_dll_path += [os.path.join(p, n) for p in dll_path] + else: + lib_dll_path = [os.path.join(p, name) for p in dll_path] + runtime_dll_path = [] + ext_lib_dll_path = [] + else: + if sys.platform.startswith("win32"): + lib_dll_names = ["libtvm.dll", "tvm.dll"] + runtime_dll_names = ["libtvm_runtime.dll", "tvm_runtime.dll"] + ext_lib_dll_names = [ + "3rdparty/cutlass_fpA_intB_gemm/cutlass_kernels/libfpA_intB_gemm.dll", + "3rdparty/libflash_attn/src/libflash_attn.dll", + ] + elif sys.platform.startswith("darwin"): + lib_dll_names = ["libtvm.dylib"] + runtime_dll_names = ["libtvm_runtime.dylib"] + ext_lib_dll_names = [ + "3rdparty/cutlass_fpA_intB_gemm/cutlass_kernels/libfpA_intB_gemm.dylib", + "3rdparty/libflash_attn/src/libflash_attn.dylib", + ] + else: + lib_dll_names = ["libtvm.so"] + runtime_dll_names = ["libtvm_runtime.so"] + ext_lib_dll_names = [ + "3rdparty/cutlass_fpA_intB_gemm/cutlass_kernels/libfpA_intB_gemm.so", + "3rdparty/libflash_attn/src/libflash_attn.so", + ] + + name = lib_dll_names + runtime_dll_names + ext_lib_dll_names + lib_dll_path = [os.path.join(p, name) for name in lib_dll_names for p in dll_path] + runtime_dll_path = [os.path.join(p, name) for name in runtime_dll_names for p in dll_path] + ext_lib_dll_path = [os.path.join(p, name) for name in ext_lib_dll_names for p in dll_path] + if not use_runtime: + # try to find lib_dll_path + lib_found = [p for p in lib_dll_path if os.path.exists(p) and os.path.isfile(p)] + lib_found += [p for p in runtime_dll_path if os.path.exists(p) and os.path.isfile(p)] + lib_found += [p for p in ext_lib_dll_path if os.path.exists(p) and os.path.isfile(p)] + else: + # try to find runtime_dll_path + use_runtime = True + lib_found = [p for p in runtime_dll_path if os.path.exists(p) and os.path.isfile(p)] + + if not lib_found: + if not optional: + message = ( + f"Cannot find libraries: {name}\n" + + "List of candidates:\n" + + "\n".join(lib_dll_path + runtime_dll_path) + ) + raise RuntimeError(message) + return None + + if use_runtime: + sys.stderr.write("Loading runtime library %s... exec only\n" % lib_found[0]) + sys.stderr.flush() + return lib_found + + +def find_include_path(name=None, search_path=None, optional=False): + """Find header files for C compilation. + + Parameters + ---------- + name : list of str + List of directory names to be searched. + + Returns + ------- + include_path : list(string) + List of all found paths to header files. + """ + if os.environ.get("TVM_HOME", None): + source_dir = os.environ["TVM_HOME"] + else: + ffi_dir = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) + for source_dir in ["..", "../..", "../../.."]: + source_dir = os.path.join(ffi_dir, source_dir) + if os.path.isdir(os.path.join(source_dir, "include")): + break + else: + raise AssertionError("Cannot find the source directory given ffi_dir: {ffi_dir}") + third_party_dir = os.path.join(source_dir, "3rdparty") + + header_path = [] + + if os.environ.get("TVM_INCLUDE_PATH", None): + header_path.append(os.environ["TVM_INCLUDE_PATH"]) + + header_path.append(source_dir) + header_path.append(third_party_dir) + + header_path = [os.path.abspath(x) for x in header_path] + if search_path is not None: + if isinstance(search_path, list): + header_path = header_path + search_path + else: + header_path.append(search_path) + if name is not None: + if isinstance(name, list): + tvm_include_path = [] + for n in name: + tvm_include_path += [os.path.join(p, n) for p in header_path] + else: + tvm_include_path = [os.path.join(p, name) for p in header_path] + dlpack_include_path = [] + dmlc_include_path = [] + else: + tvm_include_path = [os.path.join(p, "include") for p in header_path] + dlpack_include_path = [os.path.join(p, "dlpack/include") for p in header_path] + dmlc_include_path = [os.path.join(p, "dmlc-core/include") for p in header_path] + + # try to find include path + include_found = [p for p in tvm_include_path if os.path.exists(p) and os.path.isdir(p)] + include_found += [p for p in dlpack_include_path if os.path.exists(p) and os.path.isdir(p)] + include_found += [p for p in dmlc_include_path if os.path.exists(p) and os.path.isdir(p)] + + if not include_found: + message = ( + "Cannot find the files.\n" + + "List of candidates:\n" + + str("\n".join(tvm_include_path + dlpack_include_path)) + ) + if not optional: + raise RuntimeError(message) + return None + + return include_found + + +# current version +# We use the version of the incoming release for code +# that is under development. +# The following line is set by tvm/python/update_version.py +__version__ = "0.17.dev0" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/registry.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..1b6b1dec9accc4d14fcd2f80633111429c040a84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/registry.py @@ -0,0 +1,331 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# pylint: disable=invalid-name, unused-import +"""FFI registry to register function and objects.""" +import sys +import ctypes + +from .base import _LIB, check_call, py_str, c_str, string_types, _FFI_MODE, _RUNTIME_ONLY + +try: + # pylint: disable=wrong-import-position,unused-import + if _FFI_MODE == "ctypes": + raise ImportError() + from ._cy3.core import _register_object, _get_object_type_index + from ._cy3.core import _reg_extension + from ._cy3.core import convert_to_tvm_func, _get_global_func, PackedFuncBase +except (RuntimeError, ImportError) as error: + # pylint: disable=wrong-import-position,unused-import + if _FFI_MODE == "cython": + raise error + from ._ctypes.object import _register_object, _get_object_type_index + from ._ctypes.ndarray import _reg_extension + from ._ctypes.packed_func import convert_to_tvm_func, _get_global_func, PackedFuncBase + + +def register_object(type_key=None): + """register object type. + + Parameters + ---------- + type_key : str or cls + The type key of the node + + Examples + -------- + The following code registers MyObject + using type key "test.MyObject" + + .. code-block:: python + + @tvm.register_object("test.MyObject") + class MyObject(Object): + pass + """ + object_name = type_key if isinstance(type_key, str) else type_key.__name__ + + def register(cls): + """internal register function""" + if hasattr(cls, "_type_index"): + tindex = cls._type_index + else: + tidx = ctypes.c_uint() + if not _RUNTIME_ONLY: + check_call(_LIB.TVMObjectTypeKey2Index(c_str(object_name), ctypes.byref(tidx))) + else: + # directly skip unknown objects during runtime. + ret = _LIB.TVMObjectTypeKey2Index(c_str(object_name), ctypes.byref(tidx)) + if ret != 0: + return cls + tindex = tidx.value + _register_object(tindex, cls) + return cls + + if isinstance(type_key, str): + return register + + return register(type_key) + + +def get_object_type_index(cls): + """ + Get type index of object type + + Parameters + ---------- + cls : type + The object type to get type index for. + + Returns + ------- + type_index : Optional[int] + The type index, or None if type not found in the registry. + """ + return _get_object_type_index(cls) + + +def register_extension(cls, fcreate=None): + """Register a extension class to TVM. + + After the class is registered, the class will be able + to directly pass as Function argument generated by TVM. + + Parameters + ---------- + cls : class + The class object to be registered as extension. + + fcreate : function, optional + The creation function to create a class object given handle value. + + Note + ---- + The registered class is requires one property: _tvm_handle. + + If the registered class is a subclass of NDArray, + it is required to have a class attribute _array_type_code. + Otherwise, it is required to have a class attribute _tvm_tcode. + + - ```_tvm_handle``` returns integer represents the address of the handle. + - ```_tvm_tcode``` or ```_array_type_code``` gives integer represents type + code of the class. + + Returns + ------- + cls : class + The class being registered. + + Example + ------- + The following code registers user defined class + MyTensor to be DLTensor compatible. + + .. code-block:: python + + @tvm.register_extension + class MyTensor(object): + _tvm_tcode = tvm.ArgTypeCode.ARRAY_HANDLE + + def __init__(self): + self.handle = _LIB.NewDLTensor() + + @property + def _tvm_handle(self): + return self.handle.value + """ + assert hasattr(cls, "_tvm_tcode") + if fcreate: + raise ValueError("Extension with fcreate is no longer supported") + _reg_extension(cls, fcreate) + return cls + + +def register_func(func_name, f=None, override=False): + """Register global function + + Parameters + ---------- + func_name : str or function + The function name + + f : function, optional + The function to be registered. + + override: boolean optional + Whether override existing entry. + + Returns + ------- + fregister : function + Register function if f is not specified. + + Examples + -------- + The following code registers my_packed_func as global function. + Note that we simply get it back from global function table to invoke + it from python side. However, we can also invoke the same function + from C++ backend, or in the compiled TVM code. + + .. code-block:: python + + targs = (10, 10.0, "hello") + @tvm.register_func + def my_packed_func(*args): + assert(tuple(args) == targs) + return 10 + # Get it out from global function table + f = tvm.get_global_func("my_packed_func") + assert isinstance(f, tvm.PackedFunc) + y = f(*targs) + assert y == 10 + """ + if callable(func_name): + f = func_name + func_name = f.__name__ + + if not isinstance(func_name, str): + raise ValueError("expect string function name") + + ioverride = ctypes.c_int(override) + + def register(myf): + """internal register function""" + if not isinstance(myf, PackedFuncBase): + myf = convert_to_tvm_func(myf) + check_call(_LIB.TVMFuncRegisterGlobal(c_str(func_name), myf.handle, ioverride)) + return myf + + if f: + return register(f) + return register + + +def get_global_func(name, allow_missing=False): + """Get a global function by name + + Parameters + ---------- + name : str + The name of the global function + + allow_missing : bool + Whether allow missing function or raise an error. + + Returns + ------- + func : PackedFunc + The function to be returned, None if function is missing. + """ + return _get_global_func(name, allow_missing) + + +def list_global_func_names(): + """Get list of global functions registered. + + Returns + ------- + names : list + List of global functions names. + """ + plist = ctypes.POINTER(ctypes.c_char_p)() + size = ctypes.c_uint() + + check_call(_LIB.TVMFuncListGlobalNames(ctypes.byref(size), ctypes.byref(plist))) + fnames = [] + for i in range(size.value): + fnames.append(py_str(plist[i])) + return fnames + + +def extract_ext_funcs(finit): + """ + Extract the extension PackedFuncs from a C module. + + Parameters + ---------- + finit : ctypes function + a ctypes that takes signature of TVMExtensionDeclarer + + Returns + ------- + fdict : dict of str to Function + The extracted functions + """ + fdict = {} + + def _list(name, func): + fdict[name] = func + + myf = convert_to_tvm_func(_list) + ret = finit(myf.handle) + _ = myf + if ret != 0: + raise RuntimeError("cannot initialize with %s" % finit) + return fdict + + +def remove_global_func(name): + """Remove a global function by name + + Parameters + ---------- + name : str + The name of the global function + """ + check_call(_LIB.TVMFuncRemoveGlobal(c_str(name))) + + +def _get_api(f): + flocal = f + flocal.is_global = True + return flocal + + +def _init_api(namespace, target_module_name=None): + """Initialize api for a given module name + + namespace : str + The namespace of the source registry + + target_module_name : str + The target module name if different from namespace + """ + target_module_name = target_module_name if target_module_name else namespace + if namespace.startswith("tvm."): + _init_api_prefix(target_module_name, namespace[4:]) + else: + _init_api_prefix(target_module_name, namespace) + + +def _init_api_prefix(module_name, prefix): + module = sys.modules[module_name] + + for name in list_global_func_names(): + if not name.startswith(prefix): + continue + + fname = name[len(prefix) + 1 :] + target_module = module + + if fname.find(".") != -1: + continue + f = get_global_func(name) + ff = _get_api(f) + ff.__name__ = fname + ff.__doc__ = "TVM PackedFunc %s. " % fname + setattr(target_module, ff.__name__, ff) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/runtime_ctypes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/runtime_ctypes.py new file mode 100644 index 0000000000000000000000000000000000000000..e25d391ce63c6162ad753e503bc37d950f8a36e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/_ffi/runtime_ctypes.py @@ -0,0 +1,666 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Common runtime ctypes.""" +# pylint: disable=invalid-name +import ctypes +import json + +import numpy as np + +try: + import ml_dtypes +except ImportError: + ml_dtypes = None +from .base import _LIB, check_call + +tvm_shape_index_t = ctypes.c_int64 + + +class ArgTypeCode(object): + """Type code used in API calls""" + + INT = 0 + UINT = 1 + FLOAT = 2 + HANDLE = 3 + NULL = 4 + TVM_TYPE = 5 + DLDEVICE = 6 + DLTENSOR_HANDLE = 7 + OBJECT_HANDLE = 8 + MODULE_HANDLE = 9 + PACKED_FUNC_HANDLE = 10 + STR = 11 + BYTES = 12 + NDARRAY_HANDLE = 13 + OBJECT_RVALUE_REF_ARG = 14 + EXT_BEGIN = 15 + + +class TVMByteArray(ctypes.Structure): + """Temp data structure for byte array.""" + + _fields_ = [("data", ctypes.POINTER(ctypes.c_byte)), ("size", ctypes.c_size_t)] + + +class DataTypeCode(object): + """DataType code in DLTensor.""" + + INT = 0 + UINT = 1 + FLOAT = 2 + HANDLE = 3 + BFLOAT = 4 + E4M3Float = 6 + E5M2Float = 7 + + +class DataType(ctypes.Structure): + """TVM datatype structure""" + + _fields_ = [("type_code", ctypes.c_uint8), ("bits", ctypes.c_uint8), ("lanes", ctypes.c_uint16)] + CODE2STR = { + DataTypeCode.INT: "int", + DataTypeCode.UINT: "uint", + DataTypeCode.FLOAT: "float", + DataTypeCode.HANDLE: "handle", + DataTypeCode.BFLOAT: "bfloat", + DataTypeCode.E4M3Float: "e4m3_float", + DataTypeCode.E5M2Float: "e5m2_float", + } + NUMPY2STR = { + np.dtype(np.bool_): "bool", + np.dtype(np.int8): "int8", + np.dtype(np.int16): "int16", + np.dtype(np.int32): "int32", + np.dtype(np.int64): "int64", + np.dtype(np.uint8): "uint8", + np.dtype(np.uint16): "uint16", + np.dtype(np.uint32): "uint32", + np.dtype(np.uint64): "uint64", + np.dtype(np.float16): "float16", + np.dtype(np.float32): "float32", + np.dtype(np.float64): "float64", + } + if hasattr(np, "float_"): + NUMPY2STR[np.dtype(np.float_)] = "float64" + STR2DTYPE = { + "void": {"type_code": DataTypeCode.HANDLE, "bits": 0, "lanes": 0}, + "bool": {"type_code": DataTypeCode.UINT, "bits": 1, "lanes": 1}, + "int8": {"type_code": DataTypeCode.INT, "bits": 8, "lanes": 1}, + "int16": {"type_code": DataTypeCode.INT, "bits": 16, "lanes": 1}, + "int32": {"type_code": DataTypeCode.INT, "bits": 32, "lanes": 1}, + "int64": {"type_code": DataTypeCode.INT, "bits": 64, "lanes": 1}, + "uint8": {"type_code": DataTypeCode.UINT, "bits": 8, "lanes": 1}, + "uint16": {"type_code": DataTypeCode.UINT, "bits": 16, "lanes": 1}, + "uint32": {"type_code": DataTypeCode.UINT, "bits": 32, "lanes": 1}, + "uint64": {"type_code": DataTypeCode.UINT, "bits": 64, "lanes": 1}, + "e4m3_float8": {"type_code": DataTypeCode.E4M3Float, "bits": 8, "lanes": 1}, + "e5m2_float8": {"type_code": DataTypeCode.E5M2Float, "bits": 8, "lanes": 1}, + "float16": {"type_code": DataTypeCode.FLOAT, "bits": 16, "lanes": 1}, + "float32": {"type_code": DataTypeCode.FLOAT, "bits": 32, "lanes": 1}, + "float64": {"type_code": DataTypeCode.FLOAT, "bits": 64, "lanes": 1}, + } + + def __init__(self, type_str): + super(DataType, self).__init__() + numpy_str_map = DataType.NUMPY2STR + if type_str in numpy_str_map: + type_str = numpy_str_map[type_str] + elif isinstance(type_str, np.dtype): + type_str = str(type_str) + + assert isinstance(type_str, str) + + str_dtype_map = DataType.STR2DTYPE + if type_str in str_dtype_map: + dtype_map = str_dtype_map[type_str] + self.bits = dtype_map["bits"] + self.type_code = dtype_map["type_code"] + self.lanes = dtype_map["lanes"] + return + + arr = type_str.split("x") + head = arr[0] + if len(arr) == 3: + assert arr[1] == "vscale", f"Invalid data type. Expected 'vscale' but got '{arr[1]}'" + self.lanes = ctypes.c_uint16(-int(arr[2])) + elif len(arr) > 1: + self.lanes = ctypes.c_uint16(int(arr[1])) + else: + self.lanes = 1 + bits = 32 + + if head.startswith("int"): + self.type_code = DataTypeCode.INT + head = head[3:] + elif head.startswith("uint"): + self.type_code = DataTypeCode.UINT + head = head[4:] + elif head.startswith("float"): + self.type_code = DataTypeCode.FLOAT + head = head[5:] + elif head.startswith("handle"): + self.type_code = DataTypeCode.HANDLE + bits = 64 + head = "" + elif head.startswith("bfloat"): + self.type_code = DataTypeCode.BFLOAT + head = head[6:] + elif head.startswith("e4m3_float"): + self.type_code = DataTypeCode.E4M3Float + head = head[10:] + elif head.startswith("e5m2_float"): + self.type_code = DataTypeCode.E5M2Float + head = head[10:] + elif head.startswith("custom"): + # pylint: disable=import-outside-toplevel + import tvm.runtime._ffi_api + + low, high = head.find("["), head.find("]") + if not low or not high or low >= high: + raise ValueError("Badly formatted custom type string %s" % type_str) + type_name = head[low + 1 : high] + self.type_code = tvm.runtime._ffi_api._datatype_get_type_code(type_name) + head = head[high + 1 :] + else: + raise ValueError("Do not know how to handle type %s" % type_str) + bits = int(head) if head else bits + self.bits = bits + + def __repr__(self): + # pylint: disable=import-outside-toplevel + if self.bits == 0 and self.lanes == 0: + return "void" + if self.bits == 1 and self.lanes == 1: + return "bool" + if self.type_code in DataType.CODE2STR: + type_name = DataType.CODE2STR[self.type_code] + else: + import tvm.runtime._ffi_api + + type_name = "custom[%s]" % tvm.runtime._ffi_api._datatype_get_type_name(self.type_code) + x = "%s%d" % (type_name, self.bits) + lanes_as_int = ctypes.c_int16(self.lanes).value + if lanes_as_int > 1: + x += "x%d" % self.lanes + elif lanes_as_int < -1: + x += "xvscalex%d" % -lanes_as_int + return x + + def __eq__(self, other): + return ( + self.bits == other.bits + and self.type_code == other.type_code + and self.lanes == other.lanes + ) + + def __ne__(self, other): + return not self.__eq__(other) + + def itemsize(self): + """Get the number of bytes of a single element of this data type. When the number of lanes + is greater than 1, the itemsize is the size of the vector type. + + Returns + ------- + itemsize : int + The number of bytes of a single element of this data type + """ + lanes_as_int = ctypes.c_int16(self.lanes).value + if lanes_as_int < 0: + raise ValueError("Cannot determine itemsize for scalable vector types") + return (self.bits * self.lanes + 7) // 8 + + +if ml_dtypes is not None: + DataType.NUMPY2STR[np.dtype(ml_dtypes.bfloat16)] = "bfloat16" + DataType.NUMPY2STR[np.dtype(ml_dtypes.float8_e4m3fn)] = "e4m3_float8" + DataType.NUMPY2STR[np.dtype(ml_dtypes.float8_e5m2)] = "e5m2_float8" + +RPC_SESS_MASK = 128 + + +class Device(ctypes.Structure): + """TVM device strucure. + + Typically constructed using convenience function + :meth:`tvm.runtime.device`. + + Exposes uniform interface to device-specific APIs such as CUDA or + OpenCL. Some properties may return None depending on whether an + API exposes that particular property. + + NOTE! The integer values in MASK2STR and STR2MASK *must* correspond + to the values provided by the DLDeviceType and TVMDeviceExtType enums. + """ + + kDLCPU = 1 + kDLCUDA = 2 + kDLCUDAHost = 3 + kDLOpenCL = 4 + kDLVulkan = 7 + kDLMetal = 8 + kDLVPI = 9 + kDLROCM = 10 + kDLROCMHost = 11 + kDLExtDev = 12 + kDLCUDAManaged = 13 + kDLOneAPI = 14 + kDLWebGPU = 15 + kDLHexagon = 16 + kDLAOCL = 32 + kDLSDAccel = 33 + kOpenGL = 34 + kDLMicroDev = 35 + + _fields_ = [("device_type", ctypes.c_int), ("device_id", ctypes.c_int)] + MASK2STR = { + kDLCPU: "cpu", + kDLCUDA: "cuda", + kDLCUDAHost: "cuda_host", + kDLCUDAManaged: "cuda_managed", + kDLOpenCL: "opencl", + kDLVulkan: "vulkan", + kDLMetal: "metal", + kDLVPI: "vpi", + kDLROCM: "rocm", + kDLROCMHost: "rocm_host", + kDLExtDev: "ext_dev", + kDLOneAPI: "oneapi", + kDLWebGPU: "webgpu", + kDLHexagon: "hexagon", + kDLAOCL: "aocl", + kDLSDAccel: "sdaccel", + kOpenGL: "opengl", + kDLMicroDev: "microdev", + } + + STR2MASK = { + "llvm": kDLCPU, + "stackvm": kDLCPU, + "cpu": kDLCPU, + "c": kDLCPU, + "test": kDLCPU, + "hybrid": kDLCPU, + "composite": kDLCPU, + "cuda": kDLCUDA, + "nvptx": kDLCUDA, + "cl": kDLOpenCL, + "opencl": kDLOpenCL, + "sdaccel": kDLOpenCL, + "aocl": kDLAOCL, + "aocl_sw_emu": kDLAOCL, + "vulkan": kDLVulkan, + "metal": kDLMetal, + "vpi": kDLVPI, + "rocm": kDLROCM, + "hip": kDLROCM, + "ext_dev": kDLExtDev, + "hexagon": kDLHexagon, + "webgpu": kDLWebGPU, + } + + def __init__(self, device_type, device_id): + super(Device, self).__init__() + self.device_type = int(device_type) + self.device_id = device_id + + def _GetDeviceAttr(self, device_type, device_id, attr_id): + """Internal helper function to invoke runtime.GetDeviceAttr""" + # pylint: disable=import-outside-toplevel + import tvm.runtime._ffi_api + + return tvm.runtime._ffi_api.GetDeviceAttr(device_type, device_id, attr_id) + + @property + def exist(self): + """Whether this device exists. + + Returns True if TVM has support for the device, if the + physical device is present, and the device is accessible + through appropriate drivers (e.g. cuda/vulkan). + + Returns + ------- + exist : bool + True if the device exists + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 0) != 0 + + @property + def max_threads_per_block(self): + """Maximum number of threads on each block. + + Returns device value for cuda, metal, rocm, opencl, and vulkan + devices. Returns remote device value for RPC devices. + Returns None for all other devices. + + Returns + ------- + max_threads_per_block : int or None + The number of threads on each block + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 1) + + @property + def warp_size(self): + """Number of threads that execute concurrently. + + Returns device value for cuda, rocm, and vulkan. Returns + 1 for metal and opencl devices, regardless of the physical + device. Returns remote device value for RPC devices. Returns + None for all other devices. + + Returns + ------- + warp_size : int or None + Number of threads that execute concurrently + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 2) + + @property + def max_shared_memory_per_block(self): + """Total amount of shared memory per block in bytes. + + Returns device value for cuda, rocm, opencl, and vulkan. + Returns remote device value for RPC devices. Returns None for + all other devices. + + Returns + ------- + max_shared_memory_per_block : int or None + Total amount of shared memory per block in bytes + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 3) + + @property + def compute_version(self): + """Get compute version number as string. + + Returns maximum API version (e.g. CUDA/OpenCL/Vulkan) + supported by the device. + + Returns device value for cuda, rocm, opencl, and + vulkan. Returns remote device value for RPC devices. Returns + None for all other devices. + + Returns + ------- + version : str or None + The version string in `major.minor` format. + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 4) + + @property + def device_name(self): + """Return the vendor-specific name of device. + + Returns device value for cuda, rocm, opencl, and vulkan. + Returns remote device value for RPC devices. Returns None for + all other devices. + + Returns + ------- + device_name : str or None + The name of the device. + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 5) + + @property + def max_clock_rate(self): + """Return the max clock frequency of device (kHz). + + Returns device value for cuda, rocm, and opencl. Returns + remote device value for RPC devices. Returns None for all + other devices. + + Returns + ------- + max_clock_rate : int or None + The maximum clock frequency of the device (kHz) + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 6) + + @property + def multi_processor_count(self): + """Return the number of compute units in the device. + + Returns device value for cuda, rocm, and opencl. Returns + remote device value for RPC devices. Returns None for all + other devices. + + Returns + ------- + multi_processor_count : int or None + Thee number of compute units in the device + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 7) + + @property + def max_thread_dimensions(self): + """Return the maximum size of each thread axis + + Returns device value for cuda, rocm, opencl, and vulkan. + Returns remote device value for RPC devices. Returns None for + all other devices. + + Returns + ------- + dims: List of int, or None + The maximum length of threadIdx.x, threadIdx.y, threadIdx.z + + """ + return json.loads(self._GetDeviceAttr(self.device_type, self.device_id, 8)) + + @property + def api_version(self): + """Returns version number of the SDK used to compile TVM. + + For example, CUDA_VERSION for cuda or VK_HEADER_VERSION for + Vulkan. + + Returns device value for cuda, rocm, opencl, and vulkan. + Returns remote device value for RPC devices. Returns None for + all other devices. + + Returns + ------- + version : int or None + The version of the SDK + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 11) + + @property + def driver_version(self): + """Returns version number of the driver + + Returns driver vendor's internal version number. + (e.g. "450.408.256" for nvidia-driver-450) + + Returns device value for opencl and vulkan. Returns remote + device value for RPC devices. Returns None for all other + devices. + + Returns + ------- + version : str or None + The version string in `major.minor.patch` format. + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 12) + + @property + def l2_cache_size_bytes(self): + """Return the size of the device L2 cache in bytes + + Supported devices include CUDA/ROCM/OpenCL. + + Returns + ------- + l2_cache_size_bytes : int or None + The size of the device L2 cache in bytes returned by device runtime API. + Return None if the device does not support this feature. + + Note + ---- + The value returned by opencl's API is smaller than actual device L2 cache size. + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 13) + + @property + def total_global_memory(self): + """Return size of the total global memory. + + Supported devices include CUDA/ROCm/Metal/OpenCL. + + Returns + ------- + total_global_memory : int or None + Return the global memory available on device in bytes. + Return None if the device does not support this feature. + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 14) + + def texture_spatial_limit(self): + """Returns limits for textures by spatial dimensions + + Returns + ------- + limit : int or None + Maximum size of the texture by spatial dimensions + + """ + return self._GetDeviceAttr(self.device_type, self.device_id, 12) + + def create_raw_stream(self): + """Create a new runtime stream at the context. + + User should free the stream after use. + + Returns + ------- + stream : TVMStreamHandle + The created runtime stream. + """ + stream = ctypes.c_void_p() + check_call(_LIB.TVMStreamCreate(self.device_type, self.device_id, ctypes.byref(stream))) + return stream + + def free_raw_stream(self, stream): + """Free a created stream handle. + + Parameters + ---------- + stream : TVMStreamHandle + The stream which should to be released. + """ + check_call(_LIB.TVMStreamFree(self.device_type, self.device_id, stream)) + + def set_raw_stream(self, stream): + """Set a created stream handle. + + Parameters + ---------- + stream : TVMStreamHandle + The stream which should to be set to the device. + """ + check_call(_LIB.TVMSetStream(self.device_type, self.device_id, stream)) + + def sync(self, stream=None): + """Synchronize until jobs finished at the context. + + Parameters + ---------- + stream : TVMStreamHandle + Jobs in this stream should be finished. + """ + check_call(_LIB.TVMSynchronize(self.device_type, self.device_id, stream)) + + def __eq__(self, other): + return ( + isinstance(other, Device) + and self.device_id == other.device_id + and self.device_type == other.device_type + ) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(str(self)) + + def __repr__(self): + if self.device_type >= RPC_SESS_MASK: + tbl_id = self.device_type / RPC_SESS_MASK - 1 + dev_type = self.device_type % RPC_SESS_MASK + return "remote[%d]:%s(%d)" % (tbl_id, Device.MASK2STR[dev_type], self.device_id) + return "%s(%d)" % (Device.MASK2STR[self.device_type], self.device_id) + + +class TVMArray(ctypes.Structure): + """TVMValue in C API""" + + _fields_ = [ + ("data", ctypes.c_void_p), + ("device", Device), + ("ndim", ctypes.c_int), + ("dtype", DataType), + ("shape", ctypes.POINTER(tvm_shape_index_t)), + ("strides", ctypes.POINTER(tvm_shape_index_t)), + ("byte_offset", ctypes.c_uint64), + ] + + def __str__(self): + shape = [self.shape[i] for i in range(self.ndim)] + if self.strides: + strides = [self.strides[i] for i in range(self.ndim)] + else: + strides = [] + + return ( + f"TVMArray(data=0x{self.data:016x}, device={self.device}, " + f"dtype={self.dtype}, shape={shape}, " + f"strides={strides}, byte_offset={self.byte_offset})" + ) + + +class ObjectRValueRef: + """Represent an RValue ref to an object that can be moved. + + Parameters + ---------- + obj : tvm.runtime.Object + The object that this value refers to + """ + + __slots__ = ["obj"] + + def __init__(self, obj): + self.obj = obj + + +TVMArrayHandle = ctypes.POINTER(TVMArray) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..791fed27cb5eab63bb5291079d6a678d7efa8e70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__init__.py @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Integer bound analysis, simplification and pattern detection.""" + +from .int_set import ( + IntSet, + IntervalSet, + PresburgerSet, + estimate_region_lower_bound, + estimate_region_strict_bound, + estimate_region_upper_bound, +) +from .analyzer import ModularSet, ConstIntBound, Analyzer, ProofStrength, Extension +from .bound import deduce_bound +from .pattern import detect_linear_equation, detect_clip_bound, detect_common_subexpr +from .int_solver import solve_linear_equations, solve_linear_inequalities +from .iter_affine_map import IterMapExpr, IterMark, IterSplitExpr, IterSumExpr +from .iter_affine_map import ( + detect_iter_map, + iter_map_simplify, + normalize_iter_map_to_expr, + normalize_to_iter_sum, + subspace_divide, + inverse_affine_iter_map, +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05fd10d970486c23445eadfb83212ac197603033 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a825d1805d6de253ded5d2994514ceb56db1e157 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/_ffi_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/_ffi_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..341199d7674d0dd8202ee283c454957d1822a9fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/_ffi_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/_ffi_api.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/_ffi_api.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..504853dddcfc9541364bbb35c5203038fd294505 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/_ffi_api.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/analyzer.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/analyzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b236e3cd2096953005684e001256da91fd69c920 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/analyzer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/analyzer.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/analyzer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73123379260af41b623eb29420672c4fd9f9b761 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/analyzer.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/bound.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/bound.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18718a83b5d697a25fe504ccf2cfc9dfb5749f87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/bound.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/bound.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/bound.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..198cac397861a0bf9e345282d3c12547d227798f Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/bound.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_set.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46069288e5444a5cb9660815acca41c65e38bcac Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_set.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_set.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_set.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e08d353d4ced202f62429aa9558bdc3cb49835ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_set.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_solver.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_solver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..668feac0a29176837d836c11faacf9670cbb489f Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_solver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_solver.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_solver.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96abe5ad30867cc8e813a29a77feaee09569170e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/int_solver.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/iter_affine_map.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/iter_affine_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76b3081254736bf57363cbf50cfbf4d3f8cc9362 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/iter_affine_map.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/iter_affine_map.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/iter_affine_map.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bd0a681f9e8ca017fb063475120ae692bf80b25 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/iter_affine_map.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/pattern.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/pattern.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de2e990fc4fed8a6373fd347ca0109d36c22b024 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/pattern.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/pattern.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/pattern.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4be6f9e8e63358473115481d4c273c5f08df5aab Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/__pycache__/pattern.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/_ffi_api.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/_ffi_api.py new file mode 100644 index 0000000000000000000000000000000000000000..c551e5651563177cfa85f235a47c645ffb34c1c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/_ffi_api.py @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""FFI APIs for tvm.arith""" +import tvm._ffi + + +tvm._ffi._init_api("arith", __name__) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/analyzer.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..6a94e3bb0176ee748d0315d6fbe6d3f7b3221ebd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/analyzer.py @@ -0,0 +1,348 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Arithmetic data structure and utility""" +import enum +from typing import Union + +import tvm._ffi +from tvm import tir, ir +from tvm.runtime import Object + +from . import _ffi_api + + +class ProofStrength(enum.IntEnum): + """Proof strength of the analysis""" + + DEFAULT = 0 + SYMBOLIC_BOUND = 1 + + +class Extension(enum.Flag): + """Extensions enabled for RewriteSimplifier + + Values should match `RewriteSimplifier::Extensions` + """ + + NoExtensions = 0 + TransitivelyProveInequalities = 1 << 0 + ConvertBooleanToAndOfOrs = 1 << 1 + ApplyConstraintsToBooleanBranches = 1 << 2 + ComparisonOfProductAndSum = 1 << 3 + + +@tvm._ffi.register_object("arith.ModularSet") +class ModularSet(Object): + """Represent range of (coeff * x + base) for x in Z""" + + def __init__(self, coeff, base): + self.__init_handle_by_constructor__(_ffi_api.ModularSet, coeff, base) + + +@tvm._ffi.register_object("arith.ConstIntBound") +class ConstIntBound(Object): + """Represent constant integer bound + + Parameters + ---------- + min_value : int + The minimum value of the bound. + + max_value : int + The maximum value of the bound. + """ + + POS_INF = (1 << 63) - 1 + NEG_INF = -POS_INF + + def __init__(self, min_value, max_value): + self.__init_handle_by_constructor__(_ffi_api.ConstIntBound, min_value, max_value) + + +class ConstraintScope: + """Constraint scope. + + Parameters + ---------- + fenter : function + A function that will be called to create an enter context. + + Note + ---- + Do not create object directly, use Analyzer.constraint_scope + """ + + def __init__(self, fenter): + self._fenter = fenter + self._fexit = None + + def __enter__(self): + self._fexit = self._fenter() + + def __exit__(self, ptype, value, trace): + self._fexit() + + +class Analyzer: + """Integer arithmetic analyzer + + This is a stateful analyzer class that can + be used to perform various symbolic integer analysis. + """ + + def __init__(self): + _mod = _ffi_api.CreateAnalyzer() + self._const_int_bound = _mod("const_int_bound") + self._const_int_bound_update = _mod("const_int_bound_update") + self._bind = _mod("bind") + self._modular_set = _mod("modular_set") + self._simplify = _mod("Simplify") + self._rewrite_simplify = _mod("rewrite_simplify") + self._get_rewrite_simplify_stats = _mod("get_rewrite_simplify_stats") + self._reset_rewrite_simplify_stats = _mod("reset_rewrite_simplify_stats") + self._canonical_simplify = _mod("canonical_simplify") + self._int_set = _mod("int_set") + self._enter_constraint_context = _mod("enter_constraint_context") + self._can_prove_equal = _mod("can_prove_equal") + self._can_prove = _mod("can_prove") + self._get_enabled_extensions = _mod("get_enabled_extensions") + self._set_enabled_extensions = _mod("set_enabled_extensions") + + def const_int_bound(self, expr): + """Find constant integer bound for expr. + + Parameters + ---------- + expr : PrimExpr + The expression. + + Returns + ------- + bound : ConstIntBound + The result bound + """ + return self._const_int_bound(expr) + + def modular_set(self, expr): + """Find a modular set that expr belongs to. + + Parameters + ---------- + expr : PrimExpr + The expression. + + Returns + ------- + result : ModularSet + The result. + """ + return self._modular_set(expr) + + def simplify(self, expr, steps=2): + """Simplify expression via both rewrite and canonicalization. + + Parameters + ---------- + expr : PrimExpr + The expression. + steps : The simplification runs in the order of + rewrite_simplify (step 1) -> canonical_simplify (step 2) -> + rewrite_simplify (step 3) -> canonical_simplify (step 4) -> ... + param steps controls how many steps to run. + Default is 2, i.e., rewrite_simplify + canonical_simplify. + + Returns + ------- + result : Expr + The result. + """ + return self._simplify(expr, steps) + + def rewrite_simplify(self, expr): + """Simplify expression via rewriting rules. + + Parameters + ---------- + expr : PrimExpr + The expression. + + Returns + ------- + result : Expr + The result. + """ + return self._rewrite_simplify(expr) + + @property + def rewrite_simplify_stats(self): + return self._get_rewrite_simplify_stats() + + def reset_rewrite_simplify_stats(self): + self._reset_rewrite_simplify_stats() + + def canonical_simplify(self, expr): + """Simplify expression via canonicalization. + + Parameters + ---------- + expr : PrimExpr + The expression. + + Returns + ------- + result : Expr + The result. + """ + return self._canonical_simplify(expr) + + def int_set(self, expr, dom_map=None): + """Compute a symbolic IntSet that covers expr for all values in dom_map. + + Parameters + ---------- + expr : PrimExpr + The expression. + + dom_map : Optional[Dict[Var, tvm.arith.IntSet]] + The domain for variables to be relaxed. If None, use the domain map defined by bound + variables. + + Returns + ------- + result : IntSet + The result. + """ + return self._int_set(expr, dom_map) + + def can_prove(self, expr, strength=ProofStrength.DEFAULT): + """Check whether we can prove expr to be true. + + Parameters + ---------- + expr : PrimExpr + The expression. + + strength: ProofStrength + The proof strength + + Returns + ------- + result : Expr + The result. + """ + return self._can_prove(expr, strength) + + def bind(self, var: tir.Var, expr: Union[tir.PrimExpr, ir.Range]): + """Bind a variable to the expression. + + Parameters + ---------- + var : tvm.tir.Var + The variable. + + expr : Union[tir.PrimExpr, ir.Range] + The expression or the range to bind to. + """ + return self._bind(var, expr) + + def constraint_scope(self, constraint): + """Create a constraint scope. + + Parameters + ---------- + constraint : PrimExpr + The constraint expression. + + returns + ------- + scope : ConstraintScope + The constraint scope + + Examples + -------- + .. code-block:: python + + x = te.var("x") + analyzer = tvm.arith.Analyzer() + with analzyer.constraint_scope(x % 3 == 0): + # constraint in effect + assert analyzer.modular_set(x).coeff == 3 + # constraint no longer in effect + assert analyzer.modular_set(x).coeff != 3 + """ + + def _fenter(): + return self._enter_constraint_context(constraint) + + return ConstraintScope(_fenter) + + def update(self, var, info, override=False): + """Update infomation about var + + Parameters + ---------- + var : tvm.tir.Var + The variable. + + info : tvm.Object + Related information. + + override : bool + Whether allow override. + """ + if isinstance(info, ConstIntBound): + self._const_int_bound_update(var, info, override) + else: + raise TypeError("Do not know how to handle type {}".format(type(info))) + + def can_prove_equal(self, lhs: "PrimExpr", rhs: "PrimExpr"): + """Whether we can prove that lhs == rhs + + Parameters + ---------- + lhs: PrimExpr + The left-hand side of the comparison + + rhs: PrimExpr + The right-hand side of the comparison + + Returns + ------- + result: bool + Whether we can prove that lhs == rhs + """ + return self._can_prove_equal(lhs, rhs) + + @property + def enabled_extensions(self) -> Extension: + """Return the currently enabled extensions""" + value = self._get_enabled_extensions() + return Extension(value) + + @enabled_extensions.setter + def enabled_extensions(self, flags: Union[int, Extension]): + """Enable extensions for the analyzer + + Parameters + ---------- + flags: Union[int,Extension] + + The extensions to enable. + """ + flags = Extension(flags).value + self._set_enabled_extensions(flags) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/bound.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/bound.py new file mode 100644 index 0000000000000000000000000000000000000000..6f4b220a378edb76a3d464290c8efdaf0997a27e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/bound.py @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Bound deduction.""" +from . import _ffi_api + + +def deduce_bound(var, cond, hint_map, relax_map): + """Deduce the bound of the target variable in the cond. + + Parameters + ---------- + var : Var + The target variable to be deduced. + + cond : PrimExpr + The condition + + hint_map : Map[Var, IntSet] + Domain of variables used to help deduction. + + relax_map : Map[Var, IntSet] + The fomain of the variables to be relaxed + using the provided domain. + """ + return _ffi_api.DeduceBound(var, cond, hint_map, relax_map) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/int_set.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/int_set.py new file mode 100644 index 0000000000000000000000000000000000000000..d38f5e805f39d07db8496e5fb348685c8eadf664 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/int_set.py @@ -0,0 +1,197 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Integer set.""" +import tvm._ffi +from tvm.runtime import Object +from . import _ffi_api + + +class IntSet(Object): + """Represent a set of integer in one dimension.""" + + def is_nothing(self): + """Whether the set represent nothing""" + return _ffi_api.IntSetIsNothing(self) + + def is_everything(self): + """Whether the set represent everything""" + return _ffi_api.IntSetIsEverything(self) + + @staticmethod + def vector(vec): + """Construct an integer set that covers the vector expr + + Parameters + ---------- + vec : PrimExpr + The vector expression. + + Returns + ------- + rset : IntSet + The result set. + """ + return _ffi_api.intset_vector(vec) + + @staticmethod + def single_point(point): + """Construct a point set. + + Parameters + ---------- + point : PrimExpr + The vector expression. + + Returns + ------- + rset : IntSet + The result set. + """ + return _ffi_api.intset_single_point(point) + + +@tvm._ffi.register_object("arith.IntervalSet") +class IntervalSet(IntSet): + """Represent set of continuous interval [min_value, max_value] + + Parameters + ---------- + min_value : PrimExpr + The minimum value in the interval. + + max_value : PrimExpr + The maximum value in the interval. + """ + + def __init__(self, min_value, max_value): + self.__init_handle_by_constructor__(_ffi_api.IntervalSet, min_value, max_value) + + +@tvm._ffi.register_object("arith.PresburgerSet") +class PresburgerSet(IntSet): + """Represent of Presburger Set""" + + def __init__(self): + self.__init_handle_by_constructor__(_ffi_api.PresburgerSet) + + +def estimate_region_lower_bound(region, var_dom, predicate): + """Analyze the region with affine map, given the domain of variables and their predicate + Some subregion may be discarded during the lower-bound analysis. + + Parameters + ---------- + region : List[Range] + The region to be analyzed. + + var_dom : Dict[Var, Range] + The ranges of the variables + + predicate : PrimExpr + The predicate for the affine map + + Returns + ---------- + region_int_set : Optional[List[IntSet]] + None if the detection fails, or an array of IntSets as the result of analysis + """ + return _ffi_api.EstimateRegionLowerBound(region, var_dom, predicate) + + +def estimate_region_strict_bound(region, var_dom, predicate): + """Analyze the region with affine map, given the domain of variables and their predicate + The result should be strict, i.e. no region is discarded or relaxed. + + Parameters + ---------- + region : List[Range] + The region to be analyzed. + + var_dom : Dict[Var, Range] + The ranges of the variables + + predicate : PrimExpr + The predicate for the affine map + + Returns + ---------- + region_int_set : Optional[List[IntSet]] + None if the detection fails, or an array of IntSets as the result of analysis + """ + return _ffi_api.EstimateRegionStrictBound(region, var_dom, predicate) + + +def estimate_region_upper_bound(region, var_dom, predicate): + """Analyze the region with affine map, given the domain of variables and their predicate + Relaxation of the region may be used in upper-bound analysis, + i.e. some extra region may be added to the result. + + Parameters + ---------- + region : List[Range] + The region to be analyzed. + + var_dom : Dict[Var, Range] + The ranges of the variables + + predicate : PrimExpr + The predicate for the affine map + + Returns + ---------- + region_int_set : List[IntSet] + an array of IntSets as the result of analysis + """ + return _ffi_api.EstimateRegionUpperBound(region, var_dom, predicate) + + +def pos_inf(): + """Returns the symbolic positive infinity + + Returns + ---------- + pos_inf : Var + A symbolic var that indicates positive infinity + """ + return _ffi_api.PosInf() + + +def neg_inf(): + """Returns the symbolic positive infinity + + Returns + ---------- + neg_inf : Var + A symbolic var that indicates positive infinity + """ + return _ffi_api.NegInf() + + +def union_lower_bound(sets): + """Create a lower-bound of union set, where some of the segments may be dropped + + Parameters + ---------- + sets : List[IntSet] + The sets to be combined + + Returns + ---------- + union_lower_bound : List[IntSet] + An N-dimensional integer set, the lower bound of the union + """ + return _ffi_api.UnionLowerBound(sets) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/int_solver.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/int_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..6e8a010eec16b361e9d5524688aa4b5b7d40f97a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/int_solver.py @@ -0,0 +1,180 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""integer constraints data structures and solvers""" +import tvm._ffi +from tvm.runtime import Object +from . import _ffi_api + + +@tvm._ffi.register_object("arith.IntGroupBounds") +class IntGroupBounds(Object): + """Represent integer grouped bounds which are classified into + lower bounds (include), upper bounds (include) and equalities. + + Parameters + ---------- + coef : tvm.ir.PrimExpr + The coefficient. Must be integer type. + coef * var >= lower + coef * var == equal + coef * var >= upper + lower : List[tvm.ir.PrimExpr] + the lower bounds (include) + equal : List[tvm.ir.PrimExpr] + equalities + upper : List[tvm.ir.PrimExpr] + the upper bounds (include) + """ + + def __init__(self, coef, lower, equal, upper): + self.__init_handle_by_constructor__(_ffi_api.IntGroupBounds, coef, lower, equal, upper) + + @staticmethod + def from_range(rng): + """Construct a IntGroupedBounds by Range. + + Parameters + ---------- + rng : tvm.ir.Range + + + Returns + ------- + ret : Range + The constructed range. + """ + return _ffi_api.IntGroupBounds_from_range(rng) + + def find_best_range(self): + """Return the best range from the grouped bounds. + None if (-inf, +inf). + """ + return _ffi_api.IntGroupBounds_FindBestRange(self) + + +@tvm._ffi.register_object("arith.IntConstraints") +class IntConstraints(Object): + """Represent a set of integer constraints including variables, their ranges and + the relations between them (either equations or inequalities) + + Parameters + ---------- + variables : List[tvm.tir.Var] + The variables in the constraints. Must be integers + ranges : Map[tvm.tir.Var, tvm.ir.Range] + The ranges of the variables. + relations : List[tvm.ir.PrimExpr] + The relations between the variables (either equations or inequalities) + """ + + def __init__(self, variables, ranges, relations): + self.__init_handle_by_constructor__(_ffi_api.IntConstraints, variables, ranges, relations) + + +@tvm._ffi.register_object("arith.IntConstraintsTransform") +class IntConstraintsTransform(Object): + """We can have different set of variables to represent the same integer constraints. + For example, the following two constrains are equivalent, + {a + b = 0 | a >= 0, b >= 0} and + {m - n = 0 | m >= 0, n <= 0} + This data structure represents the transformation + between two equivalent integer constraints. + In the above example, + src : {a + b = 0 | a >= 0, b >= 0} + dst : {m - n = 0 | m >= 0, n <= 0} + src_to_dst : {a -> m, b -> -n} + dst_to_src : {m -> a, n -> -b} + + Parameters + ---------- + src : arith.IntConstraints + source integer constraints, e.g., {a + b = 0 | a >= 0, b >= 0} + dst : arith.IntConstraints + integer constraints equivalent to the source, e.g., {m - n = 0 | m >= 0, n <= 0} + src_to_dst : Map[tvm.tir.Var, tvm.ir.PrimExpr] + mapping from variables in the src to the variables in the dst, + e.g., {a -> m, b -> -n} + dst_to_src : Map[tvm.tir.Var, tvm.ir.PrimExpr] + mapping from variables in the dst to the variables in the src, + e.g., {m -> a, n -> -b} + """ + + def __init__(self, src, dst, src_to_dst, dst_to_src): + self.__init_handle_by_constructor__( + _ffi_api.IntConstraintsTransform, src, dst, src_to_dst, dst_to_src + ) + + +def solve_linear_equations(equations, variables=None, ranges=None): + """Solve linear equations. + + Parameters + ---------- + equations: List[tvm.ir.PrimExpr] or IntConstraints + The equations of the variables + variables : Optional[List[tvm.tir.Var]] + The variables in the system. + ranges : Optional[Map[tvm.tir.Var, tvm.ir.Range]] + The ranges of the variables. + + Returns + ------- + int_constraints_transform : IntConstraintsTransform + New integer constraints, with less variables (if the problem is NOT of full rank), + or no variable (if the problem is of full rank), + or an empty integer constraints (if the problem is unsolvable). + It also provides the ranges of the variables in the new system, + as well as inequalities inferred from the problem. + You can get the mapping from the original variables to the solution via + int_constraints_transform.src_to_dst. + """ + if isinstance(equations, IntConstraints): + return _ffi_api.SolveLinearEquations(equations) + return _ffi_api.SolveLinearEquations(variables, ranges, equations) + + +def solve_linear_inequalities(equations, variables=None, ranges=None, deskew_range=False): + """Solve linear inequalities. + + Parameters + ---------- + equations : List[tvm.ir.PrimExpr] or IntConstraints + The inequalities of the variables + variables : Optional[List[tvm.tir.Var]] + The variables in the system. + ranges : Optional[Map[tvm.tir.Var, tvm.ir.Range]] + The ranges of the variables. + deskew_range: Optional[bool] + Whether deskew the result ranges to be started from zero. + Default false. + + Returns + ------- + ret_ranges: IntConstraints or IntConstraintsTransform + The result ranges for each variables. + Constrains that cannot be transformed to Range will be stored in IntConstraints.relations. + If deskew_range is set (=True), the result ranges will be deskewed to be started from zero. + New variables are created accordingly therefore IntConstraintsTransform is returned. + """ + solver = ( + _ffi_api.SolveInequalitiesDeskewRange if deskew_range else _ffi_api.SolveInequalitiesToRange + ) + if isinstance(equations, IntConstraints): + assert variables is None + assert ranges is None + return solver(equations) + return solver(variables, ranges, equations) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/iter_affine_map.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/iter_affine_map.py new file mode 100644 index 0000000000000000000000000000000000000000..f19dd0a1bac92889151c5ee1f3b7f2d03f940a9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/iter_affine_map.py @@ -0,0 +1,333 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Iterator (quasi)affine mapping patterns.""" +from enum import IntEnum +import tvm._ffi +from tvm.runtime import Object +from tvm.ir import PrimExpr +from . import _ffi_api + + +class IterMapExpr(PrimExpr): + """Base class of all IterMap expressions.""" + + +@tvm._ffi.register_object("arith.IterMark") +class IterMark(Object): + """Mark the source as an iterator in [0, extent). + + Parameters + ---------- + source : PrimExpr. + The source expression. + + extent : PrimExpr + The extent of the iterator. + """ + + def __init__(self, source, extent): + self.__init_handle_by_constructor__(_ffi_api.IterMark, source, extent) + + +@tvm._ffi.register_object("arith.IterSplitExpr") +class IterSplitExpr(IterMapExpr): + """Split of an iterator. + + result = floormod(floordiv(source, lower_factor), extent) * scale + + Parameters + ---------- + source : IterMark + The source marked iterator. + + lower_factor : PrimExpr + The lower factor to split the domain. + + extent : PrimExpr + The extent of the split. + + scale : PrimExpr + Additional scale to the split. + """ + + def __init__(self, source, lower_factor, extent, scale): + self.__init_handle_by_constructor__( + _ffi_api.IterSplitExpr, source, lower_factor, extent, scale + ) + + +@tvm._ffi.register_object("arith.IterSumExpr") +class IterSumExpr(IterMapExpr): + """Fuse multiple iterators by summing them with scaling. + + result = sum(args) + base + + Parameters + ---------- + args : List[IterSplitExpr] + The input to the sum expression. + + base : PrimExpr + The base offset. + """ + + def __init__(self, args, base): + self.__init_handle_by_constructor__(_ffi_api.IterSumExpr, args, base) + + +class IterMapLevel(IntEnum): + """Possible kinds of iter mapping check level.""" + + Bijective = 0 + Surjective = 1 + NoCheck = 3 + + @staticmethod + def from_str(name: str): + """Helper to create level enum from string""" + if name is None: + return IterMapLevel.NoCheck + name = name.lower() + if name == "bijective": + check_level = IterMapLevel.Bijective + elif name == "surjective": + check_level = IterMapLevel.Surjective + elif name == "nocheck": + check_level = IterMapLevel.NoCheck + else: + raise ValueError(f"Unknown check level {name}") + return check_level + + +def detect_iter_map( + indices, + input_iters, + predicate=True, + check_level=IterMapLevel.Surjective, + simplify_trivial_iterators=True, +): + """Detect if indices can be written as mapped iters from input iters + + Parameters + ---------- + indices : List[PrimExpr] + The input indices + + input_iters : Map[Var, Range] + The domain of each input iterators. + + predicate : PrimExpr + The predicate constraints on the input iterators + + check_level : Union[str, IterMapLevel] + Checking level of iteration mapping + + simplify_trivial_iterators: bool + If true, iterators with extent of 1 will be replaced with a + constant value. + + Returns + ------- + results : IterMapResult + The iter map matching result. + The result's .indices is empty array if no match can be found. + + """ + if isinstance(check_level, str): + check_level = IterMapLevel.from_str(check_level) + elif check_level is None: + check_level = IterMapLevel.NoCheck + return _ffi_api.DetectIterMap( + indices, input_iters, predicate, check_level, simplify_trivial_iterators + ) + + +def normalize_to_iter_sum(index, input_iters): + """Normalize expr to iter sum. + + The normalized result ensures that + each scale is in the form of (symbol_prod) * cscale + It will also sort in desc order by cscale then len(symbol_prod). + + Parameters + ---------- + index : PrimExpr + The input index + + input_iters : Map[Var, Range] + The domain of each input iterators. + + Returns + ------- + iter_sum: IterSumExpr + The result iter sum + + Note + ---- + This function does best effort detection, so some undetected + part can go into iter_sum.base + + This function is useful to decide the stride multiplier and + division factor in buffer access patterns. + """ + return _ffi_api.NormalizeToIterSum(index, input_iters) + + +def iter_map_simplify( + indices, + input_iters, + predicate=True, + check_level=IterMapLevel.Surjective, + simplify_trivial_iterators=True, +): + """Simplify the indices using iter map detection. + + Parameters + ---------- + indices : List[PrimExpr] + The input indices + + input_iters : Map[Var, Range] + The domain of each input iterators. + + predicate : PrimExpr + The predicate constraints on the input iterators + + check_level : Union[str, IterMapLevel] + Checking level of iteration mapping + + simplify_trivial_iterators: bool + If true, iterators with extent of 1 will be replaced with a + constant value. + + Returns + ------- + results : IterMapResult + The iter map matching result. + The result's .indices is empty array if no match can be found. + + """ + if isinstance(check_level, str): + check_level = IterMapLevel.from_str(check_level) + elif check_level is None: + check_level = IterMapLevel.NoCheck + return _ffi_api.IterMapSimplify( + indices, input_iters, predicate, check_level, simplify_trivial_iterators + ) + + +def normalize_iter_map_to_expr(expr): + """Given an IterMapExpr, transform it to normal PrimExpr + + Parameters + ---------- + expr : IterMapExpr + the input IterMapExpr + + Returns + ------- + result : PrimExpr + the corresponding normal PrimExpr + """ + return _ffi_api.NormalizeIterMapToExpr(expr) + + +def subspace_divide( + bindings, + input_iters, + sub_iters, + predicate=True, + check_level=IterMapLevel.Surjective, + simplify_trivial_iterators=True, +): + """Detect if bindings can be written as + [a_0*e_0 + b_0 + c_0, a_1*e_1 + b_1, ..., a_n*e_n + b_n] + where a = some-quasi-affine-iter-map(input_iters set_minus sub_iters) + b = some-quasi-affine-iter-map(sub_iters) + c is constant symbols + e is the extent of b + For example, z*12 + y*3 + x + c = (z*4+y)*3 + x + bindings = [z*12 + y*3 + x + c] + input_iters = [z, y, x] + sub_iter = [x] + Then the result will be [a, b] where + a = [z*4 + y] + b = [x] + + Parameters + ---------- + bindings : List[PrimExpr] + The input bindings + + input_iters : Map[Var, Range] + The domain of input iterator, which is the basis of the whole space + + sub_iters : Array[Var] + The subset of input_iters, which is the basis of the subspace + + predicate : PrimExpr + The predicate constraints on the input iterators + + check_level : Union[str, IterMapLevel] + Checking level of iteration mapping + + simplify_trivial_iterators: bool + If true, iterators with extent of 1 will be replaced with a + constant value. + + Returns + ------- + results : List[List[PrimExpr]] + The result list has length len(bindings) + 1 + [0, len(bindings)): The iter map matching result. The inner list is of length 2. + The first expr is the basis of the quotient space. + The second expr is the basis of the subspace. + len(bindings): the predicate of outer space and inner space + Empty array if no match can be found. + """ + if isinstance(check_level, str): + check_level = IterMapLevel.from_str(check_level) + return _ffi_api.SubspaceDivide( + bindings, input_iters, sub_iters, predicate, check_level, simplify_trivial_iterators + ) + + +def inverse_affine_iter_map(iter_map, outputs): + """Apply the inverse of the affine transformation to the outputs. + Similar to the back-propagation, starting from the outputs, it visits the DAG of the expressions + in reverse topology order and applies the inverse of the affine transformation until it reaches + the input. The affine iter map is required to be bijective. + + For example, iter_map = [l0 // 16, l0 % 16], outputs = [output_0, output_1], + the affine transformation specified by `iter_map` will be applied to `outputs` and the result + will be {l0: ((output_0*16) + output_1)}. + + See also :any:`detect_iter_map`. + + Parameters + ---------- + iter_map : List[IterSumExpr] + The bijective affine iter map. + outputs : List[PrimExpr] + The outputs of the affine transformation. + + Returns + ------- + results : Map[Var, PrimExpr] + The map from the input to the transformed result. + """ + return _ffi_api.InverseAffineIterMap(iter_map, outputs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/pattern.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/pattern.py new file mode 100644 index 0000000000000000000000000000000000000000..3c822dc52399bc74a35d6b5fe5c93abbc5250285 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/arith/pattern.py @@ -0,0 +1,83 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Detect common patterns.""" + +from typing import Dict + +from tvm.tir import PrimExpr +from . import _ffi_api + + +def detect_linear_equation(expr, var_list): + """Match `expr = sum_{i=0}^{n-1} var[i] * coeff[i] + coeff[n]` + + Where coeff[i] and base are invariant of var[j] for all i and j. + + Parameters + ---------- + expr : PrimExpr + The expression to be matched. + + var_list : List[tvm.tir.Var] + A list of variables. + + Returns + ------- + coeff : List[PrimExpr] + A list of co-efficients if the match is successful. + An empty list if the match failed. + """ + return _ffi_api.DetectLinearEquation(expr, var_list) + + +def detect_clip_bound(expr, var_list): + """Detect if expression corresponds to clip bound of the vars + + Parameters + ---------- + expr : PrimExpr + The expression to be matched. + + var_list : List[tvm.tir.Var] + A list of variables. + + Returns + ------- + coeff : List[PrimExpr] + `concat([min_value[i], max_value[i]] for i, v in enumerate(var_list))` + An empty list if the match failed. + """ + return _ffi_api.DetectClipBound(expr, var_list) + + +def detect_common_subexpr(expr: PrimExpr, threshold: int) -> Dict[PrimExpr, int]: + """Detect common sub expression which shows up more than a threshold times + + Parameters + ---------- + expr : PrimExpr + The expression to be analyzed. + + threshold : int + The threshold of repeat times that determines a common sub expression + + Returns + ------- + cse_dict : Dict[PrimExpr, int] + The detected common sub expression dict, with sub expression and repeat times + """ + return _ffi_api.DetectCommonSubExpr(expr, threshold) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97ac323662bbaf5723ca1bf3558a60594d6a49ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__init__.py @@ -0,0 +1,80 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-import, redefined-builtin +""" Namespace for TVM Auto-scheduler. """ + +from . import ( + compute_dag, + dispatcher, + feature, + loop_state, + measure, + measure_record, + relay_integration, + search_policy, + search_task, + task_scheduler, + utils, + workload_registry, +) + +# Shortcut +from .compute_dag import ( + ComputeDAG, + LayoutRewriteOption, + get_shape_from_rewritten_layout, +) +from .cost_model import RandomModel, XGBModel +from .dispatcher import ApplyHistoryBest, ApplyHistoryBestOrSample, DispatchContext +from .measure import ( + LocalBuilder, + LocalRPCMeasureContext, + LocalRunner, + MeasureInput, + MeasureResult, + RPCRunner, + register_task_input_check_func, +) +from .measure_record import ( + RecordReader, + RecordToFile, + load_best_record, + load_records, + save_records, +) +from .relay_integration import ( + extract_tasks, + is_auto_scheduler_enabled, + remove_index_check, + rewrite_compute_body, + rewrite_tensor_shape, +) +from .search_policy import ( + EmptyPolicy, + PreloadCustomSketchRule, + PreloadMeasuredStates, + SketchPolicy, +) +from .search_task import ( + HardwareParams, + SearchTask, + TuningOptions, + auto_schedule, + create_task, +) +from .task_scheduler import TaskScheduler +from .workload_registry import make_workload_key, register_workload diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d70e9731f4f6eeb84e3d177aa88b52d602f09c8e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47627a23b90bcbcb3f08ebbef8a4220bec1be768 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/_ffi_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/_ffi_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d13fc722a6ab87e80326d789b76e7f36abe1e941 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/_ffi_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/_ffi_api.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/_ffi_api.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8a005f10c1b245c0c2e98196ccd11ab8eedcb4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/_ffi_api.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/compute_dag.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/compute_dag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64f0da4d3fb2c3a582c4049614eb18a1f4c77d78 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/compute_dag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/compute_dag.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/compute_dag.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c6d3469272a357d770bd208737cf8399c286466 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/compute_dag.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/dispatcher.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/dispatcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1bf1d4f54e3e87b680eea69c12ecd757f5bc0fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/dispatcher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/dispatcher.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/dispatcher.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e52f7e50c36e33698f831bdfa04c3e7a4c80ff4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/dispatcher.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/feature.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/feature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dedba73545b47995d80b946a0f59d266615873e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/feature.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/feature.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/feature.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5df3a432d9fc0a1ef426277b2ed2cc78cd1d87f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/feature.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/loop_state.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/loop_state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..444d8983a71252e1d88e584c59d16eafe05e27b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/loop_state.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/loop_state.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/loop_state.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63c04f85bd7f4c8fc5f8362c05b755dd709f1ad1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/loop_state.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..524612339682ddcbb8b74865ea5d02e5e467d1a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..392c1c72c7120a7eb0e4d91a2da0927cc9d1f1bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure_record.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure_record.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61738c49e3ddf68661b3f4e370c0fc88b67381c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure_record.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure_record.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure_record.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3b9bad25cc995721b518f41b1935ea26b20e65d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/measure_record.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/relay_integration.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/relay_integration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db585f9801cf3e0c8e20d9626809e2b94eaf3e35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/relay_integration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/relay_integration.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/relay_integration.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b12cd90584ae41be8aeabddad4745c0150156c2f Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/relay_integration.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a26defb31b4f1491d915775920eeb821ad658eb7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_policy.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_policy.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd8d891c188a3341aa1fbcf502ff406b6bc1a3f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_policy.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_task.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_task.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f59c43303c5f528a73ec72bd28747f91b13b014c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_task.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_task.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_task.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5fe1fb84856a9300678472b2a3ab6942a61ed77 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/search_task.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/task_scheduler.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/task_scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..360bac7d25a5c4534be638c1abab15bdec52f053 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/task_scheduler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/task_scheduler.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/task_scheduler.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b61ee861bb01fe85aa14287bcab0de7b9c795d9b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/task_scheduler.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf0d915d34eb33d848b1ef5594da274a5a1e0dd9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/utils.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd5df1dbb7274ee7e339fddb9a7295e6e0fcf348 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/utils.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/workload_registry.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/workload_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6817c134a8db8ccb7e71a8435a400c4bb977a6c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/workload_registry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/workload_registry.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/workload_registry.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c9be8c4e51b926d4ea66e4e9be18d53a8bd5c6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/__pycache__/workload_registry.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/_ffi_api.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/_ffi_api.py new file mode 100644 index 0000000000000000000000000000000000000000..d7b874f71e0f1c556a9636c790ac91beee106449 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/_ffi_api.py @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Register FFI APIs from C++ for the namespace tvm.auto_scheduler. """ +import tvm._ffi + + +tvm._ffi._init_api("auto_scheduler", __name__) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/compute_dag.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/compute_dag.py new file mode 100644 index 0000000000000000000000000000000000000000..c212d143f987957cbe78a51baca7a868c5e56299 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/compute_dag.py @@ -0,0 +1,286 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name + +""" The auto-scheduler's computational graph and related program analyses. """ + +import hashlib +import json + +import tvm._ffi +from tvm.runtime import Object +from tvm.runtime._ffi_node_api import LoadJSON, SaveJSON + +from . import _ffi_api +from .loop_state import State, StateObject +from .utils import get_const_tuple +from .workload_registry import workload_key_to_tensors + + +class LayoutRewriteOption: + """ + Options for applying layout rewrite. + + The NO_REWRITE and INSERT_TRANSFORM_STAGE are expected to be used when tuning a standalone op, + and the REWRITE_FOR_PRE_TRANSFORMED is expected to be used when tuning ops inside a network. + """ + + # Do not perform layout rewrite + NO_REWRITE = 0 + # Insert layout transformation stages for input placeholders in the compute DAG + INSERT_TRANSFORM_STAGE = 1 + # Do not insert layout transformation stages and assume the input placeholders + # are pre-transformed. + # Note: The lowered function with this option does not accept the origial input shapes, + # so this option must be used along with `AutoSchedulerLayoutRewrite` pass in Relay. + REWRITE_FOR_PRE_TRANSFORMED = 2 + + @staticmethod + def get_target_default(target, in_relay_integration=False): + """Get the default layout rewrite option for the specified target. + Currently we only enable layout rewrite for cpu / mali backend for now + + Parameters + ---------- + target: tvm.target.Target + The compilation target. + in_relay_integration: bool + If this check is ask for relay integration. + + Returns + ------- + layout_rewrite_option: LayoutRewriteOption + The default layout rewrite option for the specified target. + """ + layout_rewrite_option = LayoutRewriteOption.NO_REWRITE + if target.kind.name == "llvm" or ( + "device" in target.attrs and target.attrs["device"] == "mali" + ): + layout_rewrite_option = ( + LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED + if in_relay_integration + else LayoutRewriteOption.INSERT_TRANSFORM_STAGE + ) + + return layout_rewrite_option + + +@tvm._ffi.register_object("auto_scheduler.ComputeDAG") +class ComputeDAG(Object): + """ + The auto-scheduler's computational graph and related program analyses. + + We convert a compute declaration described by `tvm.compute` (could be a single operator or a + subgraph) to a ComputeDAG. It keeps the input/output tensors, all operations in the DAG, and + some static analysis results for the DAG (e.g. the total float operation count, + consumer/producer relations of operations, whether an operation stage should + be tiled/compute inlined). + These analyses can help the search policy to make decisions during the search. + ComputeDAG is also responsible for the interaction between auto-scheduler's `LoopState` and + TVM schedule (e.g. applying the `LoopState` transform steps to a TVM schedule, providing + `LoopState` with extra information got from TVM schedule). + + Parameters + ---------- + compute : Union[List[Tensor], str, tvm.te.Schedule] + Input/output tensors or workload key for a compute declaration. + """ + + def __init__(self, compute_or_sche): + if isinstance(compute_or_sche, str): + compute = workload_key_to_tensors(compute_or_sche) + sche = None + elif isinstance(compute_or_sche, (list, tvm.ir.container.Array)): + for item in compute_or_sche: + if not isinstance(item, tvm.te.Tensor): + raise ValueError( + "The input of ComputeDAG should be a list of Tensor, but got %s" + % type(item) + ) + compute = compute_or_sche + sche = None + elif isinstance(compute_or_sche, tvm.te.Schedule): + compute = None + sche = compute_or_sche + else: + raise ValueError( + "Invalid compute type: %s. ComputeDAG expects string, list of Tensor, or Schedule" + % type(compute_or_sche) + ) + self.__init_handle_by_constructor__(_ffi_api.ComputeDAG, compute, sche) + + def get_init_state(self): + """Get the init state of this ComputeDAG. + + Returns + ------- + state : State + The initial State without any transform steps. + """ + return State(self.init_state, self) + + def apply_steps_from_state(self, state, layout_rewrite=LayoutRewriteOption.NO_REWRITE): + """ + Apply the history transform steps from a State to get a TVM schedule. + + Parameters + ---------- + state : Union[State, StateObject] + The state from which we get transform steps. + + layout_rewrite: LayoutRewriteOption = NoRewrite + Rewrite the layout of placeholders specified by "layout_free_placeholders" attr + to make it most friendly for the generated schedule to read from. + + Returns + ------- + A `te.schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`. + """ + state_obj = state if isinstance(state, StateObject) else state.state_object + return _ffi_api.ComputeDAGApplyStepsFromState(self, state_obj, layout_rewrite) + + def print_python_code_from_state(self, state): + """ + Print transform steps in the history of a State as TVM's python schedule code. + + This is used to print transformation steps for debugging. + Use `apply_steps_from_state` if you want to get a schedule for code generation. + + Parameters + ---------- + state : Union[State, StateObject] + The state from which we get transform steps. + + Returns + ------- + str : Str + The Python schedule code. + """ + state_obj = state if isinstance(state, StateObject) else state.state_object + return _ffi_api.ComputeDAGPrintPythonCodeFromState(self, state_obj) + + def infer_bound_from_state(self, state): + """ + Infer and fill the bound of all iterators of a state. + + The states may lose complete bound information after some transform steps + (e.g., compute_at). + We can call this function to infer and fill all the bound information. + This function calls TVM InferBound pass internally to get the bound. + The returned state of this function is guaranteed to have complete iterator extent + information. + + Parameters + ---------- + state : Union[State, StateObject] + The state from which we get transform steps. + + Returns + ------- + updated_state : State + The State with complete bound information. + """ + state_obj = state if isinstance(state, StateObject) else state.state_object + updated_state = State(_ffi_api.ComputeDAGInferBoundFromState(self, state_obj), self) + # Copy the stage_id_map from the original state to make sure the old indices are still + # valid + if isinstance(state, State): + for k, v in state.stage_id_map.items(): + updated_state.stage_id_map[k] = v + return updated_state + + def rewrite_layout_from_state(self, state): + """ + Rewrite the layout of the DAG according to the history transform steps of a state. + + Parameters + ---------- + state : Union[State, StateObject] + The state from which we get transform steps. + + Returns + ------- + updated_dag : ComputeDAG + The compute dag with rewritten layout. + """ + state_obj = state if isinstance(state, StateObject) else state.state_object + return _ffi_api.ComputeDAGRewriteLayoutFromState(self, state_obj) + + def workload_key(self): + """Return the workload key of this compute DAG. + The workload key is a JSON string from a tuple of (hash of DAG, tensor shapes...) + + Returns + ------- + key: str + The workload key of this compute DAG + """ + str_dag = _ffi_api.ComputeDAGPrintDAG(self, True) + hash_func = tvm._ffi.get_global_func( + "auto_scheduler.compute_dag.hash_func", allow_missing=True + ) + + if hash_func is None: + str_dag = str_dag.encode("utf-8") + hash_key = hashlib.md5(str_dag).hexdigest() + else: + hash_key = hash_func(str_dag) + + io_shapes = [] + for tensor in self.tensors: + io_shapes.append(get_const_tuple(tensor.shape)) + return json.dumps([hash_key] + io_shapes) + + def __str__(self): + # pretty print + MAX_LINE_WIDTH = 256 + + raw_lines = super().__str__().split("\n") + lines = [] + for line in raw_lines: + if len(line) > MAX_LINE_WIDTH: + line = ( + line[: MAX_LINE_WIDTH // 2] + " ..(OMITTED).. " + line[-MAX_LINE_WIDTH // 2 :] + ) + lines.append(line) + return "\n".join(lines) + + def __getstate__(self): + return {"tensors": SaveJSON(self.tensors)} + + def __setstate__(self, state): + # Since we always use tensors to recover the ComputeDAG, we do not support + # (de)serialization of the ComputeDAG constructed by a schedule. + self.__init_handle_by_constructor__(_ffi_api.ComputeDAG, LoadJSON(state["tensors"]), None) + + +def get_shape_from_rewritten_layout(rewritten_layout, axis_names): + """Get the orginal shape from a rewritten layout string. + + Parameters + ---------- + rewritten_layout: str + The layout after rewrite + axis_names: List[str] + Specify the order of axes by names + + Returns + ------- + shape: List[PrimExpr] + The original shape + """ + return _ffi_api.GetShapeFromRewrittenLayout(rewritten_layout, axis_names) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..56e4a5f9128b3368e121e4e78f4a4dd65f431166 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__init__.py @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-import, redefined-builtin +""" Cost model that estimates the performance of programs """ + +from .cost_model import RandomModel +from .xgb_model import XGBModel diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52766f50f3dbd4651b17b961f9267b98fc695155 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..181066388c88df04af58743cfd9e9ff59a98d788 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/cost_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/cost_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bda04ac8ea3d0da4ccd5317ec1b53e3a6e252d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/cost_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/cost_model.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/cost_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..115486bd85c2f1e30c0e855bf4904cb07b35f46c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/cost_model.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/xgb_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/xgb_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3674f451ba26dc5c6f5962eeea9d89148949fd9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/xgb_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/xgb_model.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/xgb_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88d5c0502f6569db96f74c2807ef3258da185ad3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/__pycache__/xgb_model.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/cost_model.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/cost_model.py new file mode 100644 index 0000000000000000000000000000000000000000..9ef4bcac7a99733d1d3f52e9bd8c72ac9541d5d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/cost_model.py @@ -0,0 +1,174 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Cost models that estimate the performance of programs """ +import ctypes +import numpy as np + +import tvm._ffi +from tvm.runtime import Object +from .. import _ffi_api + + +@tvm._ffi.register_object("auto_scheduler.CostModel") +class CostModel(Object): + """The base class for cost model""" + + +@tvm._ffi.register_object("auto_scheduler.RandomModel") +class RandomModel(CostModel): + """A model that returns random estimation for all inputs""" + + def __init__(self): + self.__init_handle_by_constructor__(_ffi_api.RandomModel) + + def update(self, inputs, results): + """Update the cost model according to new measurement results (training data). + + Parameters + ---------- + inputs : List[auto_scheduler.measure.MeasureInput] + The measurement inputs + results : List[auto_scheduler.measure.MeasureResult] + The measurement results + """ + _ffi_api.CostModelUpdate(self, inputs, results) + + def predict(self, search_task, states): + """Predict the scores of states + + Parameters + ---------- + search_task : SearchTask + The search task of states + states : List[State] + The input states + + Returns + ------- + scores: List[float] + The predicted scores for all states + """ + return [x.value for x in _ffi_api.CostModelPredict(self, search_task, states)] + + +@tvm._ffi.register_func("auto_scheduler.cost_model.random_fill_float") +def random_fill_float(size, return_ptr): + """Fills a c++ float array with random numbers in [0, 1] + + Parameters + ---------- + size: int + The size of the array + return_ptr: + A pointer to a c++ float array + """ + if size == 0: + return + return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float)) + array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(size,)) + array_wrapper[:] = np.random.uniform(0, 1, (size,)) + + +@tvm._ffi.register_object("auto_scheduler.PythonBasedModel") +class PythonBasedModel(CostModel): + """Base class for cost models implemented in python""" + + def __init__(self): + def update_func(inputs, results): + self.update(inputs, results) + + def predict_func(task, states, return_ptr): + return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float)) + array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(len(states),)) + array_wrapper[:] = self.predict(task, states) + + def predict_stage_func(task, states, return_ptr): + ret = self.predict_stages(task, states) + return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float)) + array_wrapper = np.ctypeslib.as_array(return_ptr, shape=ret.shape) + array_wrapper[:] = ret + + self.__init_handle_by_constructor__( + _ffi_api.PythonBasedModel, update_func, predict_func, predict_stage_func + ) + + def update(self, inputs, results): + """Update the cost model according to new measurement results (training data). + + Parameters + ---------- + inputs : List[auto_scheduler.measure.MeasureInput] + The measurement inputs + results : List[auto_scheduler.measure.MeasureResult] + The measurement results + """ + raise NotImplementedError + + def predict(self, task, states): + """Predict the scores of states + + Parameters + ---------- + search_task : SearchTask + The search task of states + states : List[State] + The input states + + Returns + ------- + scores: List[float] + The predicted scores for all states + """ + raise NotImplementedError + + def predict_stages(self, task, states): + """Predict the scores of all stages in states. This is the breakdown version of `predict`. + + Parameters + ---------- + search_task : SearchTask + The search task of states + states : List[State] + The input states + + Returns + ------- + scores: List[float] + The predicted scores for all stages in all states in the packed format + + Note + ---- + For faster data copy between c++ and python, the python part returns scores in a + single flatten array using a packed format. The c++ part then unpacks the flatten array. + + The packed format is: + { + float scores[N]; // scores[i] is the score for states[i]. + int n_stage_0; // the number of stages in states[0] + float stage_scores_0[[n_stage_0] // the scores for all stages in states[0] + int n_stage_1; // the number of stages in states[1] + float stage_scores_1[n_stage_1]; // the scores for all stages in states[1] + ... + int n_stage_i; // the number of stages in states[i] + float stage_scores_1[n_stage_i]; // the scores for all stages in states[i] + ... // until i == N - 1 + } + To implement this format, we also store int as float, so we can store all numbers + into a single float array. + """ + raise NotImplementedError diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/xgb_model.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/xgb_model.py new file mode 100644 index 0000000000000000000000000000000000000000..c7cdb15634e19d218d186d598af1a7be08b8863e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/cost_model/xgb_model.py @@ -0,0 +1,683 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name + +"""Cost model based on xgboost""" +import multiprocessing +import logging +from typing import Dict +from collections import defaultdict + +import numpy as np + +from tvm.autotvm.tuner.metric import max_curve +from .cost_model import PythonBasedModel +from ..feature import get_per_store_features_from_measure_pairs, get_per_store_features_from_states +from ..measure_record import RecordReader + +try: + from xgboost.callback import TrainingCallback # type: ignore +except ImportError: + + class TrainingCallback: # type: ignore + pass + + +xgb = None + +logger = logging.getLogger("auto_scheduler") + + +class XGBDMatrixContext: + """A global context to hold additional attributes of xgb.DMatrix""" + + def __init__(self): + self.context_dict = defaultdict(dict) + + def get(self, key, matrix, default=None): + """ + Get an attribute of a xgb.DMatrix + Parameters + ---------- + key: str + The name of the attribute + matrix: xgb.DMatrix + The matrix + default: Optional[Any] + The default value if the item does not exist + """ + return self.context_dict[key].get(matrix.handle.value, default) + + def set(self, key, matrix, value): + """ + Set an attribute for a xgb.DMatrix + Parameters + ---------- + key: str + The name of the attribute + matrix: xgb.DMatrix + The matrix + value: Optional[Any] + The new value + """ + self.context_dict[key][matrix.handle.value] = value + + +dmatrix_context = XGBDMatrixContext() + + +class XGBModel(PythonBasedModel): + """Train a XGBoost model to predict the normalized throughputs of programs. + Let the normalized throughput be the score of a program (higher is better). We predict + the (approximate) score of a program = the sum of the scores of all stages in this program. + i.e. score(P) = score_s0 + score_s1 + ... + score_sn, + where score_si is the score of Stage i in Program P. + We extract feature for each stage and let the xgboost predict the score for each stage. + We then sum up the predictions as the score of the whole program. + We use RMSE as the loss function. i.e. loss(P, y) = 1/2 * (score(P) - y)^2, + where P is the program and y is the normalized throughput according to + the ground truth (measurement). + XGBoost does not support this loss function because `score(P)` is a sum of the prediction + of several samples, so we implemented a custom loss function and call it pack-sum-rmse. + It is called "pack-sum" because we combine several samples into a "pack" and sum up + their predictions. + + Parameters + ---------- + verbose_eval: int = 25 + Print training log every `verbose_eval` iterations. + num_warmup_sample: int = 100 + The minimum number of samples to start to use the trained model. + If the number of samples is less than this number, the model outputs random predictions. + seed: Optional[int] + The random seed + model_file: Optional[str] + If is not None, save model to this file after every update. + adaptive_training: bool = False + Whether to use adaptive training, which reduces the training frequency when there are + too many logs. + """ + + def __init__( + self, + verbose_eval=25, + num_warmup_sample=100, + seed=None, + model_file=None, + adaptive_training=False, + ): + global xgb + try: + if xgb is None: + xgb = __import__("xgboost") + except ImportError: + # add "from Node" to silence + # "During handling of the above exception, another exception occurred" + raise ImportError( + "XGBoost is required for XGBModel. " + "Please install its python package first. " + "Help: (https://xgboost.readthedocs.io/en/latest/) " + ) from None + + self.xgb_params = { + "max_depth": 10, + "gamma": 0.001, + "min_child_weight": 0, + "eta": 0.2, + # todo(merrymercy): automatically decrease learning rate when the loss is too large + "n_gpus": 0, + "nthread": multiprocessing.cpu_count() // 2, + "verbosity": 0, + "seed": seed or 43, + "disable_default_eval_metric": 1, + } + self.bst = None + self.plan_size = 32 + self.num_warmup_sample = num_warmup_sample + self.verbose_eval = verbose_eval + self.model_file = model_file + self.adaptive_training = adaptive_training + + super().__init__() + + # cache measurement input/result pairs and extracted features + self.inputs = [] + self.results = [] + self.last_train_length = 0 + self.inputs_feature_cache = [] + + def update(self, inputs, results): + """Update the cost model according to new measurement results (training data). + XGBoost does not support incremental training, so we re-train a new model every time. + Parameters + ---------- + inputs : List[MeasureInput] + The measurement inputs + results : List[MeasureResult] + The measurement results + """ + if len(inputs) <= 0: + return + assert len(inputs) == len(results) + + self.inputs.extend(inputs) + self.results.extend(results) + + if ( + self.adaptive_training + and len(self.inputs) - self.last_train_length < self.last_train_length / 5 + ): + # Set a training threshold related to `last_train_length` to reduce the training + # overhead when there're too many logs + return + self.last_train_length = len(self.inputs) + + # extract feature + n_cached = len(self.inputs_feature_cache) + features, normalized_throughputs, task_ids = get_per_store_features_from_measure_pairs( + self.inputs, self.results, skip_first_n_feature_extraction=n_cached + ) + if n_cached > 0: + features = list(features) + features[:n_cached] = self.inputs_feature_cache + features = np.array(features, dtype=object) + self.inputs_feature_cache = features + dtrain = pack_sum_xgbmatrix( + features, normalized_throughputs, task_ids, normalized_throughputs + ) + + # train xgb model + self.bst = xgb.train( + self.xgb_params, + dtrain, + num_boost_round=10000, + obj=pack_sum_square_error, + callbacks=[ + CustomCallback( + stopping_rounds=50, + metric="tr-p-rmse", + fevals=[pack_sum_rmse, pack_sum_average_peak_score(self.plan_size)], + evals=[(dtrain, "tr")], + maximize=False, + verbose_eval=self.verbose_eval, + ) + ], + ) + + # Update the model file if it has been set + if self.model_file: + self.save(self.model_file) + + def predict(self, task, states): + """Predict the scores of states + Parameters + ---------- + search_task : SearchTask + The search task of states + statse : List[State] + The input states + Returns + ------- + scores: List[float] + The predicted scores for all states + """ + features = get_per_store_features_from_states(states, task) + if self.bst is not None and len(self.inputs) > self.num_warmup_sample: + dtest, pack_ids = feature_to_pack_sum_xgbmatrix(features) + raw_preds = self.bst.predict(dtest) + ret = predict_throughput_pack_sum(raw_preds, pack_ids) + else: + ret = np.random.uniform(0, 1, (len(states),)) + + # Predict -inf for invalid states that failed to be lowered. + for idx, feature in enumerate(features): + if feature.min() == feature.max() == 0: + ret[idx] = float("-inf") + + return ret + + def predict_stages(self, task, states): + """Predict the scores of all stages in states. This is the breakdown version of `predict`. + + Parameters + ---------- + search_task : SearchTask + The search task of states + statse : List[State] + The input states + + Returns + ------- + scores: List[float] + The predicted scores for all stages in all states in the packed format + + Note + ---- + For faster data copy between c++ and python, the python part returns scores in a + single flatten array using a packed format. The c++ part then unpacks the flatten array. + The packed format is: + { + + float scores[N]; // scores[i] is the score for states[i]. + int n_stage_0; // the number of stages in states[0] + float stage_scores_0[[n_stage_0] // the scores for all stages in states[0] + int n_stage_1; // the number of stages in states[1] + float stage_scores_1[n_stage_1]; // the scores for all stages in states[1] + ... + int n_stage_i; // the number of stages in states[i] + float stage_scores_1[n_stage_i]; // the scores for all stages in states[i] + ... // untill i == N - 1 + + } + To implement this format, we also store int as float, so we can store all numbers + into a single float array. + """ + features = get_per_store_features_from_states(states, task) + if self.bst is not None and len(self.inputs) > self.num_warmup_sample: + dtest, pack_ids = feature_to_pack_sum_xgbmatrix(features) + raw_preds = self.bst.predict(dtest) + breakdown = predict_throughput_pack_sum(raw_preds, pack_ids) + stage_scores = [[] for _ in range(len(states))] + for pred, pack_id in zip(raw_preds, pack_ids): + stage_scores[pack_id].append(pred) + for idx, stage_score in enumerate(stage_scores): + breakdown = np.append(breakdown, len(stage_score)) + breakdown = np.concatenate((breakdown, np.array(stage_score))) + else: + breakdown = np.concatenate( + (np.random.uniform(0, 1, (len(states),)), np.zeros(len(states))) + ) + + # Predict 0 for invalid states that failed to be lowered. + for idx, feature in enumerate(features): + if feature.min() == feature.max() == 0: + breakdown[idx] = float("-inf") + + return breakdown + + def update_from_file(self, file_name, n_lines=None): + """Load measure records from a log file to update the cost model. + This function can be used to pre-train the cost model with history log files. + Parameters + ---------- + file_name: str + The filename + n_lines: Optional[int] + Only load first n lines of the log file + """ + inputs, results = RecordReader(file_name).read_lines(n_lines) + logger.info("XGBModel: Loaded %s measurement records from %s", len(inputs), file_name) + self.update(inputs, results) + + def save(self, file_name: str): + """Save the model to a file + Parameters + ---------- + file_name: str + The filename + """ + self.bst.save_model(file_name) + + def load(self, file_name: str): + """Load the model from a file + Parameters + ---------- + file_name: str + The filename + """ + if self.bst is None: + self.bst = xgb.Booster(self.xgb_params) + self.bst.load_model(file_name) + self.num_warmup_sample = -1 + + +def feature_to_pack_sum_xgbmatrix(xs): + """Convert an extracted multi-stage feature vector to a xgbmatrx in pack-sum format + Parameters + ---------- + xs: np.ndarray + The feature vector + Returns + ------- + dmatrix: xgb.DMatrix + The DMatrix + pack_ids: List[int] + pack ids information + """ + x_flatten = [] + pack_ids = [] + + for ct, x in enumerate(xs): + for row in x: + x_flatten.append(row) + pack_ids.append(ct) + + return xgb.DMatrix(np.array(x_flatten)), pack_ids + + +def pack_sum_xgbmatrix(xs, ys, gids=None, weights=None): + """Convert (feature, label) pairs into a xgb matrix with pack-sum format + Parameters + ---------- + xs: np.ndarray + The feature vector + ys: np.ndarray + The normaizlied throughput + gids: Optional[List[int]] + Group id (task id) + weights: Optional[np.ndarray] + The weight of samples + Returns + ------- + dmatrix: xgb.DMatrix + The DMatrix with pack-sum information + """ + if gids is not None: + # sort by group + indices = gids.argsort() + xs, ys = xs[indices], ys[indices] + group_sizes = np.bincount(gids) + if weights is not None: + weights = weights[indices] + else: + # assume it has only one group + group_sizes = [len(xs)] + + x_flatten = [] + y_flatten = [] + weights_flatten = [] + pack_ids = [] + + if weights is not None: + for ct, (x, y, w) in enumerate(zip(xs, ys, weights)): + for row in x: + x_flatten.append(row) + y_flatten.append(y) + weights_flatten.append(w) + pack_ids.append(ct) + else: + for ct, (x, y) in enumerate(zip(xs, ys)): + for row in x: + x_flatten.append(row) + y_flatten.append(y) + pack_ids.append(ct) + + ret = xgb.DMatrix(np.array(x_flatten), y_flatten) + if weights is not None: + ret.set_weight(weights_flatten) + dmatrix_context.set("pack_ids", ret, np.array(pack_ids)) + dmatrix_context.set("group_sizes", ret, group_sizes) + return ret + + +def predict_throughput_pack_sum(raw_preds, pack_ids): + """Predict the throughputs for predictions in pack-sum format + Parameters + ---------- + raw_preds: np.ndarray + The raw predictions + pack_ids: List[int] + The pack id for predictions + Returns + ------- + throughputs: np.ndarray + The throughput + """ + sum_pred = np.bincount(pack_ids, weights=raw_preds) + return sum_pred + + +def pack_sum_square_error(preds, dtrain): + """Implement square error loss on pack-sum format as + a custom objective function for xgboost. + Parameters + ---------- + preds: np.ndarray + The predicitons + dtrain: xgb.DMatrix + The training set + Returns + ------- + gradient: np.ndarray + hessian: np.ndarray + gradient and hessian according to the xgboost format + """ + pack_ids = dmatrix_context.get("pack_ids", dtrain) + weight = dtrain.get_weight() + + sum_pred = np.bincount(pack_ids, weights=preds) + x = sum_pred[pack_ids] + y = dtrain.get_label() + gradient = x - y + hessian = np.ones_like(gradient) + + if len(weight) == 0: + return gradient, hessian + + return gradient * weight, hessian * weight + + +def pack_sum_rmse(raw_preds, labels): + """Evaluate RMSE (rooted mean square error) in the pack-sum format + Parameters + ---------- + raw_preds: np.ndarray + The raw prediction + labels: xgb.DMatrix + The groud-truth label matrix + Returns + ------- + name: str + score: float + The name and score of this metric + """ + pack_ids = dmatrix_context.get("pack_ids", labels) + preds = predict_throughput_pack_sum(raw_preds, pack_ids)[pack_ids] + return "p-rmse", np.sqrt(np.mean(np.square((preds - labels.get_label())))) + + +def pack_sum_average_peak_score(N): + """Return the evaluation function for average-peak-score@N + Parameters + ---------- + N: int + The "N" in "average-peak-score@N" + Returns + ------- + The evaluation function + """ + + def feval(preds, labels): + """Evaluate average-peak-score@N in the pack-sum format + Parameters + ---------- + raw_preds: np.ndarray + The raw prediction + labels: xgb.DMatrix + The groud-truth label matrix + Returns + ------- + name: str + score: float + The name and score of this metric + """ + group_sizes = dmatrix_context.get("group_sizes", labels, [len(preds)]) + pack_ids = dmatrix_context.get("pack_ids", labels) + + preds = predict_throughput_pack_sum(preds, pack_ids) + labels = ( + np.bincount(pack_ids, weights=labels.get_label()) + / np.unique(pack_ids, return_counts=True)[1] + ) + + scores = [] + offset = 0 + for size in group_sizes: + preds_group = preds[offset : offset + size] + labels_group = labels[offset : offset + size] + offset += size + + trials = np.argsort(preds_group)[::-1][:N] + trial_scores = labels_group[trials] + curve = max_curve(trial_scores) / np.max(labels_group) + scores.append(np.mean(curve)) + return f"a-peak@{N}", np.mean(scores) + + return feval + + +class XGBoostCallback(TrainingCallback): + """Base class for XGBoost callbacks.""" + + def __call__(self, env: "xgb.core.CallbackEnv"): + # Compatibility with xgboost < 1.3 + return self.after_iteration(env.model, env.iteration, env.evaluation_result_list) + + def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict): + raise NotImplementedError + + +class CustomCallback(XGBoostCallback): + """ + Callback function for xgboost. + Support custom evaluation function and early-stopping. + """ + + def __init__( + self, + stopping_rounds, + metric, + fevals, + evals=(), + log_file=None, + maximize=False, + verbose_eval=True, + skip_every=2, + ): + """Init function""" + self.stopping_rounds = stopping_rounds + self.metric = metric + self.metric_shortname = metric.split("-")[1] + self.fevals = fevals + self.evals = evals + self.log_file = log_file + self.maximize = maximize + self.verbose_eval = verbose_eval + self.skip_every = skip_every + self.state = {} + + def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict): + """Run after each iteration. Return True when training should stop.""" + # pylint:disable = import-outside-toplevel + try: + from xgboost.callback import _fmt_metric # type: ignore + except ImportError: + # Compatibility with xgboost >= 1.6 + def _fmt_metric(value, show_stdv=True): + """format metric string""" + if len(value) == 2: + return f"{value[0]}:{value[1]:.5f}" + if len(value) == 3: + if show_stdv: + return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}" + return f"{value[0]}:{value[1]:.5f}" + raise ValueError("wrong metric value", value) + + ##### init state ##### + if not self.state: + self.state["maximize_score"] = self.maximize + self.state["best_iteration"] = 0 + if self.maximize: + self.state["best_score"] = float("-inf") + else: + self.state["best_score"] = float("inf") + + assert model is not None + if model.attr("best_score") is not None: + self.state["best_score"] = float(model.attr("best_score")) + self.state["best_iteration"] = int(model.attr("best_iteration")) + self.state["best_msg"] = model.attr("best_msg") + else: + model.set_attr(best_iteration=str(self.state["best_iteration"])) + model.set_attr(best_score=str(self.state["best_score"])) + res_dict = {} + + if epoch % self.skip_every == 1: + return False + + ##### evaluation ##### + for feval in self.fevals: + bst_eval = model.eval_set(self.evals, epoch, feval) + res = [x.split(":") for x in bst_eval.split()] + for kv in res[1:]: + res_dict[kv[0]] = [float(kv[1])] + + eval_res = [] + keys = list(res_dict.keys()) + keys.sort(key=lambda x: x if self.metric_shortname not in x else "a" + x) + for key in keys: + v = res_dict[key] + eval_res.append([key] + v) + + ##### print eval result ##### + if ( + not isinstance(self.verbose_eval, bool) + and self.verbose_eval + and epoch % self.verbose_eval == 0 + ): + infos = [f"XGB iter: {epoch:3d}"] + for item in eval_res: + if "null" in item[0]: + continue + infos.append(f"{item[0]}: {item[1]:.6f}") + + logger.debug("\t".join(infos)) + if self.log_file: + with open(self.log_file, "a") as fout: + fout.write("\t".join(infos) + "\n") + + ##### choose score and do early stopping ##### + score = None + for item in eval_res: + if item[0] == self.metric: + score = item[1] + break + assert score is not None + + best_score = self.state["best_score"] + best_iteration = self.state["best_iteration"] + maximize_score = self.state["maximize_score"] + + if (maximize_score and score > best_score) or (not maximize_score and score < best_score): + msg = f"[{epoch}] " + "\t".join([_fmt_metric(x) for x in eval_res]) + self.state["best_msg"] = msg + self.state["best_score"] = score + self.state["best_iteration"] = epoch + # save the property to attributes, so they will occur in checkpoint. + if model is not None: + model.set_attr( + best_score=str(self.state["best_score"]), + best_iteration=str(self.state["best_iteration"]), + best_msg=self.state["best_msg"], + ) + elif epoch - best_iteration >= self.stopping_rounds: + best_msg = self.state["best_msg"] + if self.verbose_eval: + logger.debug("XGB stopped. Best iteration: %s ", best_msg) + return True + + return False diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/dispatcher.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..3384850502c793924a2ac8912dc79f9a309a0981 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/dispatcher.py @@ -0,0 +1,467 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +The global context that dispatches best schedules to workloads. + +In auto-scheduler, a state (loop_state.py::StateObject) saves the +schedule configuration by its transform_steps, so a state is used +as a schedule configuration here. +""" +# pylint: disable=invalid-name + +import logging +import pathlib +from collections.abc import Iterable + +import numpy as np + +from tvm.contrib.utils import tempdir +from tvm.tir.expr import FloatImm +from .cost_model import RandomModel, XGBModel +from .measure import LocalRPCMeasureContext +from .measure_record import RecordToFile, load_records +from .search_policy import PreloadMeasuredStates, SketchPolicy +from .search_task import SearchTask, TuningOptions +from .utils import calc_workload_dis_factor, decode_workload_key + +logger = logging.getLogger("auto_scheduler") + + +class DispatchContext(object): + """ + Base class of dispatch context. + """ + + current = None + + def __init__(self): + self._old_ctx = DispatchContext.current + + def query(self, target, workload_key, has_complex_op, dag, func_name): + """ + Query the context to get the specific config for a workload. + If this function cannot find the result inside this context, it will query the result + from the upper contexts. + + Parameters + ---------- + target: Target + The current target + workload_key : str + The workload key + has_complex_op: bool + Whether this workload has at least one complex op. + dag: ComputeDAG + The ComputeDAG of the workload. + func_name: str + The function name of this workload. + + Returns + ------- + state : StateObject + The state that stores schedule configuration for the workload + """ + ret = self._query_inside(target, workload_key, func_name) + if ret is None: + ret = self._old_ctx.query(target, workload_key, has_complex_op, dag, func_name) + return ret + + def update(self, target, workload_key, state): + """ + Update the config for a workload + + Parameters + ---------- + target: Target + The current target + workload_key : str + The current workload_key. + state : StateObject + The state that stores schedule configuration for the workload + """ + raise NotImplementedError() + + def _query_inside(self, target, workload_key, func_name): + """ + Query the context to get the specific config for a workload. + This function only query config inside this context. + + Parameters + ---------- + target: Target + The current target + workload_key : str + The current workload_key. + func_name: str + The function name of this workload. + + Returns + ------- + state : StateObject + The schedule configuration for the workload + """ + raise NotImplementedError() + + def __enter__(self): + self._old_ctx = DispatchContext.current + DispatchContext.current = self + return self + + def __exit__(self, ptype, value, trace): + DispatchContext.current = self._old_ctx + + +class ApplyHistoryBest(DispatchContext): + """ + Apply the history best config + + Parameters + ---------- + records : str, list of str, or iterator of (auto_scheduler.measure.MeasureInput,\ + auto_scheduler.measure.MeasureResult) + Collection of tuning records. + If is str, then it should be the filename of a records log file. + Each row of this file is an encoded record pair. If it is an iterator, + it can either be a set of str filenames which will be applied jointly, + or a set of (input, result) tuples. + n_lines: Optional[int] + if it is not None, only load the first `n_lines` lines of log. + include_compatible: bool + When set to True, compatible records will also be considered. + """ + + def __init__(self, records, n_lines=None, include_compatible=False): + super(ApplyHistoryBest, self).__init__() + self.include_compatible = include_compatible + + # Dict[str (target key), + # Dict[str (workload hash), + # Dict[tuple (workload args), tuple (State, cost)]]] + self.best_by_targetkey = {} + self.best_by_model = {} + self._best_user_defined = {} + + self.load(records, n_lines) + + @staticmethod + def get_workload_entry(best_records, target_key, workload_key): + """Get the entry of the target key and workload key hash in the given best record map. + + Parameters + ---------- + best_records: Dict[str, Dict[str, Dict[str, Any]]] + The best record map. + target_key: str + The first key to the best_records. + workload_key: str + The workload key that can be decoded to workload hash and args. + + Returns + ------- + entry: Dict[str, Any] + The entry in best_records with target key and workload hash. + workload_hash: str + The workload hash decoded from workload_key. + workload_args: Tuple[Any, ...] + The hashable tuple of workload args decoded from workload_key. + """ + workload_hash, workload_args = decode_workload_key(workload_key) + if target_key not in best_records: + best_records[target_key] = {} + if workload_hash not in best_records[target_key]: + best_records[target_key][workload_hash] = {} + return best_records[target_key][workload_hash], workload_hash, workload_args + + def load(self, records, n_lines=None): + """Load records to this dispatch context + + Parameters + ---------- + records : str or iterator of (auto_scheduler.measure.MeasureInput,\ + auto_scheduler.measure.MeasureResult) + Collection of tuning records. + If is str, then it should be the filename of a records log file. + Each row of this file is an encoded record pair. Otherwise, it is an iterator. + n_lines: Optional[int] + if it is not None, only load the first `n_lines` lines of log + """ + joint_records = [] + if not isinstance(records, Iterable) or isinstance(records, str): + records = [records] + + for rec in records: + if isinstance(rec, pathlib.Path): + rec = str(rec) + + if isinstance(rec, str): + rec = load_records(rec) + joint_records += rec + else: + if rec is not None: + joint_records.append(rec) + + if not joint_records: + return + + best_by_targetkey = self.best_by_targetkey + best_by_model = self.best_by_model + + counter = 0 + for inp, res in joint_records: + if n_lines is not None and counter >= n_lines: + break + counter += 1 + if res.error_no != 0: + continue + + costs = [x.value for x in res.costs if isinstance(x, FloatImm)] + cost = np.mean(costs) + + # use target keys in tvm target system as key to build best map + for k in inp.task.target.keys: + entry, _, workload_args = self.get_workload_entry( + best_by_targetkey, k, inp.task.workload_key + ) + if workload_args not in entry: + entry[workload_args] = (inp.state, cost) + else: + _, other_cost = entry[workload_args] + if other_cost > cost: + entry[workload_args] = (inp.state, cost) + + # use model as key to build best map + entry, _, workload_args = self.get_workload_entry( + best_by_model, inp.task.target.model, inp.task.workload_key + ) + if workload_args not in entry: + if inp.task.target.model != "unknown": + entry[workload_args] = (inp.state, cost) + else: + _, other_cost = entry[workload_args] + if other_cost > cost: + entry[workload_args] = (inp.state, cost) + + logger.debug("Finish loading %d records", counter) + + def _query_inside(self, target, workload_key, func_name): + if target is None: + raise RuntimeError( + "Need a target context to find the history best. " + "Hint: If your target is llvm, use `with tvm.target.create('llvm'):`" + " above the dispatcher call. So does other target. " + ) + + def match_record(best_records, target_key, workload_key): + """The helper function to match the record in the given map + and return the matched state, or None if no match. + """ + ret = None + + entry, workload_hash, workload_args = self.get_workload_entry( + best_records, target_key, workload_key + ) + if workload_args in entry: + ret = entry[workload_args][0] + elif self.include_compatible: + best_cost = float("inf") + for args, val in entry.items(): + dis_f = calc_workload_dis_factor( + (workload_hash, workload_args), (workload_hash, args) + ) + if dis_f == float("inf"): + continue + + state, cost = val + cost *= dis_f + if ret is None or cost < best_cost: + best_cost = cost + ret = state + return ret + + # first try matching by model + ret = match_record(self._best_user_defined, target.model, workload_key) + if ret is not None: + return ret + ret = match_record(self.best_by_model, target.model, workload_key) + if ret is not None: + return ret + + # then try matching by target key + for k in target.keys: + ret = match_record(self._best_user_defined, k, workload_key) + if ret is not None: + return ret + ret = match_record(self.best_by_targetkey, k, workload_key) + if ret is not None: + return ret + + return None + + def update(self, target, workload_key, state): + entry, _, workload_args = self.get_workload_entry( + self._best_user_defined, target.model, workload_key + ) + entry[workload_args] = (state, 1) + + for k in target.keys: + entry, _, _ = self.get_workload_entry(self._best_user_defined, k, workload_key) + entry[workload_args] = (state, 1) + + +class ApplyHistoryBestOrSample(ApplyHistoryBest): + """ + Apply the history best config, or sample a valid schedule if no config is found. + + Parameters + ---------- + records : str or iterator of (auto_scheduler.measure.MeasureInput,\ + auto_scheduler.measure.MeasureResult) + Collection of tuning records. + If is str, then it should be the filename of a records log file. + Each row of this file is an encoded record pair. Otherwise, it is an iterator. + sample_simple_workloads: bool + When False, sampling will not apply to simple workloads (w/o reduction). + cost_model_file: str + The filename of the pre-trained XGBoost cost model. If not present, then random + model will be used. + num_measure: int + Meausre the top-N rank of sampled schedules on the device. The default -1 means + no measurement and simply return the top-1 schedule ranked by the cost model. + """ + + def __init__( + self, records, sample_simple_workloads=False, cost_model_file=None, num_measure=-1 + ): + self.sample_simple_workloads = sample_simple_workloads + self.num_measure = num_measure + self.log_dir = tempdir() + if cost_model_file is None: + self.cost_model = RandomModel() + else: + self.cost_model = XGBModel() + self.cost_model.load(cost_model_file) + + super(ApplyHistoryBestOrSample, self).__init__( + records, n_lines=None, include_compatible=True + ) + + def query(self, target, workload_key, has_complex_op, dag, func_name): + if has_complex_op or self.sample_simple_workloads: + ret = self._query_inside(target, workload_key, func_name) + else: + ret = super(ApplyHistoryBestOrSample, self)._query_inside( + target, workload_key, func_name + ) + + if ret is None: + ret = self._old_ctx.query(target, workload_key, has_complex_op, dag, func_name) + return ret + + def _query_inside(self, target, workload_key, func_name): + ret = super(ApplyHistoryBestOrSample, self)._query_inside(target, workload_key, func_name) + if ret is not None: + return ret + + # Sampling valid schedules when no existing records can be used. + task = SearchTask(workload_key=workload_key, target=target) + measure_ctx = LocalRPCMeasureContext(min_repeat_ms=300) + + log_file = self.log_dir.relpath(f"{decode_workload_key(workload_key)[0]}.log") + + while ret is None: + tune_option = TuningOptions( + num_measure_trials=self.num_measure, + runner=measure_ctx.runner, + measure_callbacks=[RecordToFile(log_file)], + verbose=0, + ) + search_policy = SketchPolicy( + task, + self.cost_model, + params={ + "eps_greedy": 0.01, + "sample_init_min_population": 64, + "evolutionary_search_num_iters": 0, + }, + init_search_callbacks=[PreloadMeasuredStates(log_file)], + verbose=0, + ) + task.tune(tune_option, search_policy) + + # Load the sampled records and query again. + self.load(log_file) + ret = super(ApplyHistoryBestOrSample, self)._query_inside( + target, workload_key, func_name + ) + + del measure_ctx + return ret + + +class FallbackContext(DispatchContext): + """ + A fallback dispatch context. + This is used as the root context. + """ + + def __init__(self): + super(FallbackContext, self).__init__() + self.memory = {} + + # Verbose level: + # 0: Completely silent. + # 1: Warning the missing configs for querying complex tasks. + # 2: Warning the missing configs for querying all tasks. + self.verbose = 1 + + # a set to prevent print duplicated message + self.messages = set() + + def query(self, target, workload_key, has_complex_op, dag, func_name): + key = (str(target), workload_key) + if key in self.memory: + return self.memory[key] + + if self.verbose == 2 or (has_complex_op and self.verbose == 1): + msg = ( + f"-----------------------------------\n" + f"{func_name}\n" + f"Cannot find tuned schedules for target={target}, workload_key={workload_key}. " + f"A fallback TOPI schedule is used, " + f"which may bring great performance regression or even compilation failure. " + f"Compute DAG info:\n{dag}" + ) + if msg not in self.messages: + self.messages.add(msg) + logger.warning(msg) + + state = None + + # cache this config to avoid duplicated warning message + self.memory[key] = state + return state + + def _query_inside(self, target, workload_key, func_name): + _ = target = workload_key = func_name + raise RuntimeError("This function should never be called") + + def update(self, target, workload_key, state): + key = (str(target), workload_key) + self.memory[key] = state + + +DispatchContext.current = FallbackContext() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/feature.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/feature.py new file mode 100644 index 0000000000000000000000000000000000000000..ea62560a6f6e8f61961c67e38d11479d44326822 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/feature.py @@ -0,0 +1,329 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""" +Python API for Feature extraction. The extracted features vector are used by cost models. + +We extract one feature vector per BufferStoreNode statement in a TIR Stmt, +so we call this feature as "per-store" feature. +The cost model also does prediction for each BufferStoreNode statement and aggregates +the predicted score of each BufferStoreNode as the score of a TIR Stmt. + +The feature specification is defined by `src/auto_scheduler/feature.cc::FeatureSet` +""" + +from typing import List, Tuple, Union, Optional, Dict +import struct + +import numpy as np + +from .loop_state import State, StateObject +from .measure import MeasureInput, MeasureResult +from . import _ffi_api +from ..tir import PrimFunc + +# The maximum number of extracted buffers for one statement +DEFAULT_MAX_N_BUFS = 5 + +# The length of the feature vector +DEFAULT_FEATURE_VEC_LEN = 164 + +# The size of int and float in bytes +SIZE_OF_INT32 = 4 +SIZE_OF_FLOAT32 = 4 + + +def unpack_feature(byte_arr: bytearray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Unpack the flatten feature (in byte array format) from c++ + + Parameters + ---------- + byte_arr: bytearray + The two-dimensional feature vector in serialized byte array format + + Returns + ------- + features: np.ndarray + Feature vectors + normalized_throughputs: np.ndarray + Normalized throughputs + task_ids: np.ndarray + Task ids + + Note + ---- + For faster data copy between c++ and python, the c++ part returns features in a single + flatten array using a packed format. The python part then unpacks the flatten array. + + The packed format for n records is: + { + int n; + int sizes[n+2]; // The sizes for the following arrays + + float features_0[size[0]]; // The features for record 0 + float features_1[size[1]]; // The features for record 1 + ... + float features_i[size[i]]; // The features for record i + ... // until i == n - 1 + + float throughputs[sizes[n]]; // The normalized throughputs for n records + int task_ids[size[n+1]]; // The task ids for n records + + } + To implement this format, we also store int as float, so we can store all numbers + into a single float array. + """ + vec_len = DEFAULT_FEATURE_VEC_LEN + + # unpack sizes + offset = 0 + n = struct.unpack_from("1i", byte_arr, offset=offset)[0] + offset += SIZE_OF_INT32 + + sizes = struct.unpack_from(f"{n + 2}i", byte_arr, offset=offset) + offset += SIZE_OF_INT32 * (n + 2) + + # unpack features + features = [] + for size in sizes[:-2]: + row = [] + + # Now, we need to unpack the feature for multiple statements. + # The format is: + # { + # int n_stage; // The number of stages + # float feature_vecs[n_stage][vec_len] // The feature vector for each stage + # } + # where vec_len can be calculated by `(size - 1) / n_stmts` + + if size == 0: + # failed during lowering + features.append(np.zeros((1, vec_len))) + else: + n_stmts = struct.unpack_from("f", byte_arr, offset=offset) + offset += SIZE_OF_FLOAT32 + + n_stmts = int(n_stmts[0] + 0.5) + tmp_vec_len = (size - 1) // n_stmts + assert ( + tmp_vec_len == vec_len + ), f"The length of feature vector is wrong. Expected {vec_len} but got {tmp_vec_len}." + assert tmp_vec_len * n_stmts == size - 1 + for _ in range(n_stmts): + x = struct.unpack_from(f"{vec_len}f", byte_arr, offset=offset) + offset += vec_len * SIZE_OF_FLOAT32 + row.append(x) + + features.append(np.array(row)) + + # unpack normalized_throughputs + m = sizes[-2] + normalized_throughputs = struct.unpack_from(f"{m}f", byte_arr, offset=offset) + offset += m * SIZE_OF_FLOAT32 + + # unpack task_ids + m = sizes[-1] + task_ids = struct.unpack_from(f"{m}i", byte_arr, offset=offset) + offset += m * SIZE_OF_INT32 + + assert offset == len(byte_arr), f"{offset} vs {len(byte_arr)}" + return np.array(features, dtype=object), np.array(normalized_throughputs), np.array(task_ids) + + +def get_per_store_features_from_file( + filename: str, max_lines: int, max_n_bufs: Optional[int] = None +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Get per-store features from a log file + + Parameters + ---------- + filename: str + The input filename + max_lines: int + Only extract the first n lines of the file + max_n_bufs: Optional[int] + The maximum number of extracted buffers for one statement + + Returns + ------- + features: np.ndarray + Feature vectors + normalized_throughputs: np.ndarray + Normalized throughputs + task_ids: np.ndarray + Task ids + """ + byte_arr = _ffi_api.GetPerStoreFeaturesFromFile( + filename, max_lines, max_n_bufs or DEFAULT_MAX_N_BUFS + ) + return unpack_feature(byte_arr) + + +def get_per_store_features_from_measure_pairs( + inputs: List[MeasureInput], + results: List[MeasureResult], + skip_first_n_feature_extraction: int = 0, + max_n_bufs: Optional[int] = None, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Get per-store features from measurement input/result pairs + + Parameters + ---------- + inputs: List[MeasureInput] + The measure inputs + results: List[MeasureResult] + The measure results + skip_first_n_feature_extraction: int + Skip feature extraction for the first n states + max_n_bufs: int + The maximum number of extracted buffers for one statement + + Returns + ------- + features: np.ndarray + Feature vectors + normalized_throughputs: np.ndarray + Normalized throughputs + task_ids: np.ndarray + Task ids + """ + byte_arr = _ffi_api.GetPerStoreFeaturesFromMeasurePairs( + inputs, results, skip_first_n_feature_extraction, max_n_bufs or DEFAULT_MAX_N_BUFS + ) + return unpack_feature(byte_arr) + + +def get_per_store_features_from_states( + states: List[Union[State, StateObject]], task: "SearchTask", max_n_bufs: Optional[int] = None +) -> np.ndarray: + """Get per-store features from measurement input/result pairs + + Parameters + ---------- + states: List[Union[State, StateObject]] + The input states + task: SearchTask + The search task of the input states + max_n_bufs: Optional[int] + The maximum number of extracted buffers for one statement + + Returns + ------- + features: np.ndarray + Feature vectors + """ + if isinstance(states[0], State): + state_objects = [s.state_object for s in states] + elif isinstance(states[0], StateObject): + state_objects = states + byte_arr = _ffi_api.GetPerStoreFeaturesFromStates( + state_objects, task, max_n_bufs or DEFAULT_MAX_N_BUFS + ) + return unpack_feature(byte_arr)[0] + + +def get_per_store_feature_names(max_n_bufs: Optional[int] = None) -> List[str]: + """Get the name of every element in the feature vector. Use this for debug and inspection. + + Parameters + ---------- + max_n_bufs: int + The maximum number of extracted buffers for one statement + + Returns + ------- + names: List[str] + The names of elements in the flatten feature vector + """ + return _ffi_api.GetPerStoreFeatureNames(max_n_bufs or DEFAULT_MAX_N_BUFS) + + +def features_from_primfunc( + func: PrimFunc, + cache_line_bytes: int = 64, + max_n_bufs: Optional[int] = None, + log_scale: bool = False, +) -> Optional[np.ndarray]: + """Extract performance features from a PrimFunc. + + Parameters + ---------- + func: PrimFunc + PrimFunc from which features will be extracted. Each store operation to + a unique buffer in the function will result in one row of features in + the output. + + cache_line_bytes: int, optional + Size of a cache line in bytes. Defaults to 64 which is the size for + most x86 processors. + + max_n_bufs: int, optional + Maximum number of buffers in generated features. This determines the + length of the resulting feature vector. + + log_scale: bool + Should entries in the feature vector be scaled by log2(x + 1). Defaults + to False. Use True if using features with a cost model. + + Returns + ------- + Optional[np.ndarray] + Output features, one row per store into a unique buffer statement in `func`. + """ + return _ffi_api.FeaturesFromPrimFunc( + func, cache_line_bytes, max_n_bufs or DEFAULT_MAX_N_BUFS, log_scale + ).numpy() + + +def named_features_from_primfunc( + func: PrimFunc, + cache_line_bytes: int = 64, + max_n_bufs: Optional[int] = None, + log_scale: bool = False, +) -> Optional[Dict[str, np.ndarray]]: + """Extract performance features and associated names from a PrimFunc. + + Parameters + ---------- + func: PrimFunc + PrimFunc from which features will be extracted. Each store operation to + a unique buffer in the function will result in one row of features in + the output. + + cache_line_bytes: int, optional + Size of a cache line in bytes. Defaults to 64 which is the size for + most x86 processors. + + max_n_bufs: int, optional + Maximum number of buffers in generated features. This determines the + length of the resulting feature vector. + + log_scale: bool + Should entries in the feature vector be scaled by log2(x + 1). Defaults + to False. Use True if using features with a cost model. + + Returns + ------- + Optional[Dict[str, np.ndarray]] + Mapping from feature name to features. One element per store into a + unique buffer statement in `func`. + """ + features = features_from_primfunc(func, cache_line_bytes, max_n_bufs, log_scale) + names = get_per_store_feature_names(max_n_bufs) + if features.shape[0] == 0: + return None + return {name: features[:, i] for i, name in enumerate(names)} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/loop_state.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/loop_state.py new file mode 100644 index 0000000000000000000000000000000000000000..03cc00def6b74582860583498814da5c9ecf37ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/loop_state.py @@ -0,0 +1,618 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-import + +""" +The definition of the "state" in the search. +Each LoopState corresponds to a schedule for its ComputeDAG. +A LoopState consists of: 1. a current loop structure; 2. a list of transformation steps used to +construct the loop structure. +The loop structure keeps a preview of how the schedule will finally look like after lowering the +current state (e.g. number of iterators, the extent of each iterator, the compute_at locations +...). +During the schedule search process, the loop structure can provide search policy with necessary +information on how to manipulate the current state. +The transform history is a sequence of `TransformStep` which will finally be mapped to TVM +schedule primitives. The steps are also used for the serialization of a state. +The LoopState can be seen as a lightweight loop structure IR specifically for schedule search. +We don't use the existing TVM IR but to extend a new structure on it is because: +1. We want fast incremental change to the loop structures. The search policy needs to get the +immediate loop structures update rather than after TVM lowering; +2. We want serializable transform history for replay, backtracking, and mutation; +3. We may create some macro schedule primitives that represent the combination of several +TVM schedule primitives. +When the search is finished, we will lower the state to TVM IR with TVM's schedule primitives. +Since we share a lot of common objects during search, the transformation is implemented in +copy on write style. All objects are immutable, which is similar to TVM IR. +""" + +import tvm._ffi +from tvm.te.tensor import Operation, Tensor +from tvm.runtime import Object +from . import _ffi_api + + +@tvm._ffi.register_object("auto_scheduler.Iterator") +class Iterator(Object): + """A loop iterator structure.""" + + +@tvm._ffi.register_object("auto_scheduler.Stage") +class Stage(Object): + """A stage in the compute declaration. Similar to tvm.te.schedule.Stage.""" + + # Static trans table for compute_at location + # This is used to transform the compute_at location to C++ enum + COMPUTE_AT_TRANS_TABLE = {"root": 0, "inlined": 1, "iter": 2} + + +@tvm._ffi.register_object("auto_scheduler.State") +class StateObject(Object): + """The internal State object""" + + def __eq__(self, other): + return _ffi_api.StateEqual(self, other) + + +class State: + """ + A state in the search process. It consists of the current loop structure + and a list of transformation steps used to construct it. + Each State corresponds to a specific schedule for its ComputeDAG. + Parameters + ---------- + state_object : StateObject + The StateObject corresponding to C++ internal State object. + dag : ComputeDAG + The original ComputeDAG of this State. + Notes + ----- + This is a wrapper class of StateObject to deal with copy-on-write property + """ + + # Static trans table for thread bind and annotation + # This is used to transform the annotation name to C++ enum + ANNOTATION_TRANS_TABLE = { + "none": 0, + "unroll": 1, + "vectorize": 2, + "parallel": 3, + "vthread": 4, + "blockIdx.x": 5, + "threadIdx.x": 6, + "blockIdx.y": 7, + "threadIdx.y": 8, + "blockIdx.z": 9, + "threadIdx.z": 10, + "tensorize": 11, + } + + def __init__(self, state_object, dag): + self.state_object = state_object + self.compute_dag = dag + + self.stage_id_map = {} # A dict maps operation to stage id + self._update_stage_id_map() + + @property + def stages(self): + """ + Returns + ------- + stages : List[Stage] + """ + return self.state_object.stages + + @property + def transform_steps(self): + """ + Returns + ------- + transform_steps : List[transform_steps] + """ + return self.state_object.transform_steps + + @property + def stage_ops(self): + """ + Returns + ------- + ops: List[Operation] + """ + return [stage.op for stage in self.stages] + + def bind(self, stage, iterator, thread_name): + """Schedule primitive corresponding to `te.Stage.bind`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be binded, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The iterator to be binded. + thread_name : str + The thread type to be binded. Candidates: + - vthread + - blockIdx.x + - threadIdx.x + - blockIdx.y + - threadIdx.y + - blockIdx.z + - threadIdx.z + Returns + ------- + res_it : Iterator + The binded Iterator. + """ + if not thread_name in State.ANNOTATION_TRANS_TABLE.keys(): + raise ValueError("Invalid thread_name: ", thread_name) + + self.state_object, res = _ffi_api.StateBind( + self.state_object, + self._resolve_stage_id(stage), + iterator, + State.ANNOTATION_TRANS_TABLE[thread_name], + ) + return res + + def parallel(self, stage, iterator): + """Schedule primitive corresponding to `te.Stage.parallel`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be paralleled, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The iterator to be paralleled. + Returns + ------- + res_it : Iterator + The paralleled Iterator. + """ + self.state_object, res = _ffi_api.StateParallel( + self.state_object, self._resolve_stage_id(stage), iterator + ) + return res + + def unroll(self, stage, iterator, max_unroll=None): + """Schedule primitive corresponding to `te.Stage.unroll`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be unrolled, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The iterator to be unrolled. + max_unroll : Optional[int] + The max unroll limit. Iterator with extent larger than this limit will be skipped. + Returns + ------- + res_it : Iterator + The unrolled Iterator. + """ + self.state_object, res = _ffi_api.StateUnroll( + self.state_object, + self._resolve_stage_id(stage), + iterator, + max_unroll if max_unroll else -1, + ) + return res + + def vectorize(self, stage, iterator): + """Schedule primitive corresponding to `te.Stage.vectorize`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be vectorized, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The iterator to be vectorized. + Returns + ------- + res_it : Iterator + The vectorized Iterator. + """ + self.state_object, res = _ffi_api.StateVectorize( + self.state_object, self._resolve_stage_id(stage), iterator + ) + return res + + def fuse(self, stage, iters): + """Schedule primitive corresponding to `te.Stage.fuse`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be fused, which can be specified by the integer index, Operation, + or output tensor of the stage. + iters : List[Iterator] + The iterators to be fused. + Returns + ------- + res_it : Iterator + The fused Iterator. + Notes + ----- + If the iterators to be fused have stages attached at them(by compute_at), the fused + result will become the new attach point. + """ + self.state_object, res = _ffi_api.StateFuse( + self.state_object, self._resolve_stage_id(stage), iters + ) + return res + + def pragma(self, stage, iterator, pragma_type): + """Schedule primitive corresponding to `te.Stage.pragma`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to add pragma, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The iterator to add pragma. + pragma_type : str + The pragma string. + """ + self.state_object = _ffi_api.StatePragma( + self.state_object, self._resolve_stage_id(stage), iterator, pragma_type + ) + + def reorder(self, stage, order): + """Schedule primitive corresponding to `te.Stage.reorder`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be reordered, which can be specified by the integer index, Operation, + or output tensor of the stage. + order : List[Iterator] + Iterators in the expected order. + """ + self.state_object = _ffi_api.StateReorder( + self.state_object, self._resolve_stage_id(stage), order + ) + + def split(self, stage, iterator, lengths, inner_to_outer=True): + """Schedule primitive corresponding to `te.Stage.split`. + See also the `te.Stage` for more details. + This API supports multiple split factors. (e.g. with 2 split factors, the original iterator + will be split to 3 parts, use `inner_to_outer` to control the split order) + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be split, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The iterator to be split. + lengths: List[int] + The multiple split factors. Can be None to be filled by search policy. + inner_to_outer: boolean = True + Whether the factor go from inner to outer, or from outer to inner. + Returns + ------- + res_its : List[Iterator] + The splitted new Iterators. + Notes + ----- + If we do split on an iterator which has stages attached at it(by compute_at), the inner + most iterator of split results will become the new attach point. + """ + self.state_object, res = _ffi_api.StateSplit( + self.state_object, self._resolve_stage_id(stage), iterator, lengths, inner_to_outer + ) + return res + + def follow_split(self, stage, iterator, src_step_id, n_split): + """The schedule primitive similar to split, but uses split factors from previous steps. + This step splits the iterator by the same factors as the given SplitStep. + Notes + ------ + This step is useful in a scenario that we have subgraph Dense -> Relu, + and we want to compute the Dense stage at ReLU. In this case, we need them to have + the same tiling structure of common outer loops. + The follow_split step could be used here to split the Dense stage and makes sure its + splitting factors are the same as the given split step for the ReLU stage. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be split, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The iterator to split. + src_step_id : int + The index of the split step to be followed in the history. + n_split : int + The number of split level. + Returns + ------- + res_its : List[Iterator] + The splitted new Iterators. + """ + + self.state_object, res = _ffi_api.StateFollowSplit( + self.state_object, self._resolve_stage_id(stage), iterator, src_step_id, n_split + ) + return res + + def follow_fused_split(self, stage, iterator, src_step_ids, level, factor_or_nparts): + """Schedule primitive extends to split step. + This step is used to split an iterator by the same factors + as the given list of SplitSteps and FuseSteps. + Notes + ------ + This step is useful in a scenario that we have a subgraph + in GPU schedule: Input -> Dense + for i.0@j.0 = ... : Bind to blockIdx.x + for i.1@j.1 = ... : Bind to threadIdx.x + for i.2@j.2 = ... + Input_shared = Input ... + for k = ... + Dense = ... + We intend to apply cooperative fetching with the input stage, while the threadIdx.x + axis is bound to an iterator generated by split & fuse step. + The follow_fused_step is used split the iterator to 2 parts, while the split factor + matches the final extent of the threadIdx.x bound iterator. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be split, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The iterator to split. + src_step_ids : List[int] + The indices of the split steps to be followed in the history. + level : int + Use the length in this split level. + factor_or_nparts : bool + True to use `factor` for split from inner to outer, + False to use `nparts` for split from outer to inner. + Returns + ------- + res_its : List[Iterator] + The splitted new Iterators. + """ + + self.state_object, res = _ffi_api.StateFollowFusedSplit( + self.state_object, + self._resolve_stage_id(stage), + iterator, + src_step_ids, + level, + factor_or_nparts, + ) + return res + + def storage_align(self, stage, iterator, factor, offset): + """Schedule primitive corresponding to `te.Stage.storage_align`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be storage aligned, which can be specified by the integer index, + Operation, or output tensor of the stage. + iterator : Iterator + The iterator to be aligned. + factor : int + The factor in alignment specification. + offset : int + The offset in the alignment specification. + """ + self.state_object = _ffi_api.StateStorageAlign( + self.state_object, self._resolve_stage_id(stage), iterator, factor, offset + ) + + def compute_at(self, stage, target_stage, target_iter): + """Schedule primitive corresponding to `te.Stage.compute_at`. + See also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The source Stage of computed at, which can be specified by the integer index, + Operation, or output tensor of the stage. + target_stage : Union[int, Operation, Tensor] + The target stage of compute_at, which can be specified by the integer index, Operation, + or output tensor of the stage. + target_iter : Iterator + The target Iterator of compute_at. + Notes + ----- + After compute_at, we need careful dependency analysis to compute the accurate bound + information. However, it is relatively expensive and complicated, so we just fill "None" + as bound for the newly created iterators. + Call ComputeDAG::InferBound on the returned state to get the complete bound information. + """ + self.state_object = _ffi_api.StateComputeAt( + self.state_object, + self._resolve_stage_id(stage), + self._resolve_stage_id(target_stage), + target_iter, + ) + + def compute_inline(self, stage): + """Schedule primitive corresponding to `te.Stage.compute_inline`, see also the `te.Stage` + for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be marked compute inlined, which can be specified by the integer index, + Operation, or output tensor of the stage. + """ + self.state_object = _ffi_api.StateComputeInline( + self.state_object, self._resolve_stage_id(stage) + ) + + def compute_root(self, stage): + """Schedule primitive corresponding to `te.Stage.compute_root`. + Ssee also the `te.Stage` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be marked compute at root, which can be specified by the integer index, + Operation, or output tensor of the stage. + Notes + ----- + After compute_root, we need careful dependency analysis to compute the accurate bound + information. However, it is relatively expensive and complicated, so we just fill "None" + as bound for the newly created iterators. + Call ComputeDAG::InferBound on the returned state to get the complete bound information. + """ + self.state_object = _ffi_api.StateComputeRoot( + self.state_object, self._resolve_stage_id(stage) + ) + + def cache_read(self, stage, scope_name, reader_stages): + """Schedule primitive corresponding to `te.Schedule.cache_read`. + See also the `te.Schedule` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be cache_read, which can be specified by the integer index, Operation, + or output tensor of the stage. + scope_name : str + The scope name of the newly added read stage. + reader_stages : List[Union[int, Operation, Tensor]] + The reader stages. Each of the list can be specified by the integer index, Operation, + or output tensor of the stage. + Returns + ------- + new_stage_op : Operator + The Operator of the new added stage. + Notes + ----- + Cache read step will insert an extra stage to the original ComputeDAG (at the back of the + target stage). + """ + reader_stage_ids = [self._resolve_stage_id(i) for i in reader_stages] + self.state_object, new_stage_id = _ffi_api.StateCacheRead( + self.state_object, + self._resolve_stage_id(stage), + scope_name, + reader_stage_ids, + self.compute_dag, + ) + # Add a new stage will change all ops behind the added stage. But we still want to keep the + # original ops map, apply stage id offset to stage_id_map to make them work. + self._apply_stage_id_offset(int(new_stage_id)) + self._update_stage_id_map() + return self.stages[int(new_stage_id)].op + + def cache_write(self, stage, scope_name): + """Schedule primitive corresponding to `te.Schedule.cache_write`. + See also the `te.Schedule` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be cache_write, which can be specified by the integer index, Operation, + or output tensor of the stage. + scope_name : str + The scope name of the newly added compute stage. + Returns + ------- + new_stage_op : Operator + The Operator of the new added stage. + Notes + ----- + Cache write step will insert an extra stage to the original ComputeDAG (in the front of the + target stage). + This step will cache write all output tensors of the target stage. + """ + self.state_object, new_stage_id = _ffi_api.StateCacheWrite( + self.state_object, self._resolve_stage_id(stage), scope_name, self.compute_dag + ) + # Add a new stage will change all ops behind the added stage. But we still want to keep the + # original ops map, apply stage id offset to stage_id_map to make them work. + self._apply_stage_id_offset(int(new_stage_id)) + self._update_stage_id_map() + return self.stages[int(new_stage_id)].op + + def rfactor(self, stage, iterator, factor_iter_id): + """Schedule primitive corresponding to `te.Schedule.rfactor`. + See also the `te.Schedule` for more details. + Parameters + ---------- + stage : Union[int, Operation, Tensor] + The Stage to be factored, which can be specified by the integer index, Operation, + or output tensor of the stage. + iterator : Iterator + The reduction iterator to be factored. + factor_iter_id : int + The position where the new iterator is placed. + Returns + ------- + new_stage_op : Operator + The Operator of the new added stage. + Notes + ----- + Rfactor step will insert an extra stage to the original ComputeDAG (in the front of the + target stage). + """ + self.state_object, new_stage_id = _ffi_api.StateRfactor( + self.state_object, + self._resolve_stage_id(stage), + iterator, + factor_iter_id, + self.compute_dag, + ) + # Add a new stage will change all ops behind the added stage. But we still want to keep the + # original ops map, apply stage id offset to stage_id_map to make them work. + self._apply_stage_id_offset(int(new_stage_id)) + self._update_stage_id_map() + return self.stages[int(new_stage_id)].op + + def copy(self): + """Do deep copy of this State.""" + state = State(self.state_object, self.compute_dag) + state.stage_id_map = self.stage_id_map.copy() + return state + + def _resolve_stage_id(self, stage_id): + if isinstance(stage_id, Operation): + return self.stage_id_map[stage_id] + if isinstance(stage_id, Tensor): + return self.stage_id_map[stage_id.op] + if isinstance(stage_id, int): + return stage_id + raise ValueError( + "Invalid stage: " + stage_id + " . Expect to be a int, Operation or Tensor" + ) + + def _update_stage_id_map(self): + for index, stage in enumerate(self.stages): + self.stage_id_map[stage.op] = index + + def _apply_stage_id_offset(self, start_id, offset=1): + for key, value in self.stage_id_map.items(): + if value >= start_id: + self.stage_id_map[key] = value + offset + + def __getitem__(self, key): + if isinstance(key, Tensor): + key = key.op + if isinstance(key, Operation): + return self.stages[self.stage_id_map[key]] + raise ValueError("Invalid item: " + key + " . Expect to be a Operation or Tensor") + + def __str__(self): + return str(self.state_object) + + def __eq__(self, other): + return _ffi_api.StateEqual(self.state_object, other.state_object) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/measure.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/measure.py new file mode 100644 index 0000000000000000000000000000000000000000..fa5f06c38f6d5e9b3668b98271c12e99850f0f48 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/measure.py @@ -0,0 +1,1334 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Distributed measurement infrastructure to measure the runtime costs of tensor programs. + +These functions are responsible for building the tvm module, uploading it to +remote devices, recording the running time costs, and checking the correctness of the output. + +We separate the measurement into two steps: build and run. +A builder builds the executable binary files and a runner runs the binary files to +get the measurement results. The flow of data structures is + + . `ProgramBuilder` `ProgramRunner` + `MeasureInput` -----------------> `BuildResult` ----------------> `MeasureResult` + +We implement these in python to utilize python's multiprocessing and error handling. +""" + +import logging +import multiprocessing +import os +import shutil +import tempfile +import time + +import tvm._ffi +from tvm.autotvm.env import AutotvmGlobalScope, reset_global_scope +from tvm.contrib import ndk, tar +from tvm.contrib.popen_pool import PopenPoolExecutor, PopenWorker, StatusKind +from tvm.driver import build_module +from tvm.ir import transform +from tvm.runtime import Object, module, ndarray +from tvm.target import Target + +from . import _ffi_api +from .loop_state import StateObject +from .utils import ( + call_func_with_timeout, + check_remote, + get_const_tuple, + get_func_name, + make_traceback_info, + request_remote, +) +from .workload_registry import ( + deserialize_workload_registry_entry, + serialize_workload_registry_entry, +) + +# pylint: disable=invalid-name +logger = logging.getLogger("auto_scheduler") + +# The time cost for measurements with errors +# We use 1e10 instead of sys.float_info.max for better readability in log +MAX_FLOAT = 1e10 + + +class BuildFunc: + """store build_func name and callable to class variable. + name: str = "default" + The name of registered build function. + build_func: callable = tar.tar + The callable of registered build function. + """ + + name = "default" + build_func = tar.tar + + +@tvm._ffi.register_object("auto_scheduler.MeasureCallback") +class MeasureCallback(Object): + """The base class of measurement callback functions.""" + + +@tvm._ffi.register_object("auto_scheduler.PythonBasedMeasureCallback") +class PythonBasedMeasureCallback(MeasureCallback): + """Base class for measure callbacks implemented in python""" + + def __init__(self): + def callback_func(policy, inputs, results): + self.callback(policy, inputs, results) + + self.__init_handle_by_constructor__(_ffi_api.PythonBasedMeasureCallback, callback_func) + + def callback(self, policy, inputs, results): + """The callback function. + + Parameters + ---------- + policy: auto_scheduler.search_policy.SearchPolicy + The search policy. + inputs : List[auto_scheduler.measure.MeasureInput] + The measurement inputs + results : List[auto_scheduler.measure.MeasureResult] + The measurement results + """ + raise NotImplementedError + + +@tvm._ffi.register_object("auto_scheduler.MeasureInput") +class MeasureInput(Object): + """Store the input of a measurement. + + Parameters + ---------- + task : SearchTask + The SearchTask of this measurement. + state : Union[State, StateObject] + The State to be measured. + """ + + def __init__(self, task, state): + state = state if isinstance(state, StateObject) else state.state_object + self.__init_handle_by_constructor__(_ffi_api.MeasureInput, task, state) + + def serialize(self): + """Custom serialization to workaround MeasureInput not exposing all its + members to the TVM ffi interface. + + Note that we do not implement __getstate__ as it does not seem to work + with initialization of the workload registry (maybe because of + initialization order?). + """ + return [ + _ffi_api.SerializeMeasureInput(self), + serialize_workload_registry_entry(self.task.workload_key), + ] + + @staticmethod + def deserialize(data): + inp = _ffi_api.DeserializeMeasureInput(data[0]) + deserialize_workload_registry_entry(data[1]) + return recover_measure_input(inp) + + +@tvm._ffi.register_object("auto_scheduler.BuildResult") +class BuildResult(Object): + """Store the result of a build. + + Parameters + ---------- + filename : Optional[str] + The filename of built binary file. + args : List[Tensor] + The arguments. + error_no : int + The error code. + error_msg : Optional[str] + The error message if there is any error. + time_cost : float + The time cost of build. + """ + + def __init__(self, filename, args, error_no, error_msg, time_cost): + filename = filename if filename else "" + error_msg = error_msg if error_msg else "" + + self.__init_handle_by_constructor__( + _ffi_api.BuildResult, filename, args, error_no, error_msg, time_cost + ) + + +@tvm._ffi.register_object("auto_scheduler.MeasureResult") +class MeasureResult(Object): + """Store the results of a measurement. + + Parameters + ---------- + costs : List[float] + The time costs of execution. + error_no : int + The error code. + error_msg : Optional[str] + The error message if there is any error. + all_cost : float + The time cost of build and run. + timestamp : float + The time stamps of this measurement. + """ + + def __init__(self, costs, error_no, error_msg, all_cost, timestamp): + error_msg = error_msg if error_msg else "" + + self.__init_handle_by_constructor__( + _ffi_api.MeasureResult, costs, error_no, error_msg, all_cost, timestamp + ) + + +def recover_measure_input(inp, rebuild_state=False): + """ + Recover a deserialized MeasureInput by rebuilding the missing fields. + 1. Rebuid the compute_dag in inp.task + 2. (Optional) Rebuild the stages in inp.state + + Parameters + ---------- + inp: MeasureInput + The deserialized MeasureInput + rebuild_state: bool = False + Whether rebuild the stages in MeasureInput.State + + Returns + ------- + new_input: MeasureInput + The fully recovered MeasureInput with all fields rebuilt. + """ + # pylint: disable=import-outside-toplevel + from .search_task import SearchTask # lazily import to avoid recursive dependency + + task = inp.task + task.target, task.target_host = Target.canon_target_and_host(task.target, task.target_host) + new_task = SearchTask( + workload_key=task.workload_key, + target=task.target, + hardware_params=task.hardware_params, + layout_rewrite_option=task.layout_rewrite_option, + task_inputs=list(task.task_input_names), + ) + + if rebuild_state: + new_state = new_task.compute_dag.infer_bound_from_state(inp.state) + else: + new_state = inp.state + + return MeasureInput(new_task, new_state) + + +@tvm._ffi.register_object("auto_scheduler.ProgramBuilder") +class ProgramBuilder(Object): + """The base class of ProgramBuilders.""" + + def build(self, measure_inputs, verbose=1): + """Build programs and return results. + + Parameters + ---------- + measure_inputs : List[MeasureInput] + A List of MeasureInput. + verbose: int = 1 + Verbosity level. 0 for silent, 1 to output information during program building. + + Returns + ------- + res : List[BuildResult] + """ + return _ffi_api.ProgramBuilderBuild(self, measure_inputs, verbose) + + +@tvm._ffi.register_object("auto_scheduler.ProgramRunner") +class ProgramRunner(Object): + """The base class of ProgramRunners.""" + + def run(self, measure_inputs, build_results, verbose=1): + """Run measurement and return results. + + Parameters + ---------- + measure_inputs : List[MeasureInput] + A List of MeasureInput. + build_results : List[BuildResult] + A List of BuildResult to be ran. + verbose: int = 1 + Verbosity level. 0 for silent, 1 to output information during program running. + + Returns + ------- + res : List[MeasureResult] + """ + return _ffi_api.ProgramRunnerRun(self, measure_inputs, build_results, verbose) + + +@tvm._ffi.register_object("auto_scheduler.ProgramMeasurer") +class ProgramMeasurer(Object): + """ + Measurer that measures the time costs of tvm programs + This class combines ProgramBuilder and ProgramRunner, and provides a simpler API. + + Parameters + ---------- + builder : ProgramBuilder + The ProgramBuilder to build programs + runner : ProgramRunner + The ProgramRunner to measure programs. + callbacks : List[MeasureCallback] + Callbacks to be called after each measurement batch + verbose : int + The Verbosity level: 0 for silent, 1 to output information during program + max_continuous_error : Optional[int] + The number of allowed maximum continuous error before stop the tuning + """ + + def __init__(self, builder, runner, callbacks, verbose, max_continuous_error=None): + max_continuous_error = max_continuous_error or -1 # -1 means using the default value + self.__init_handle_by_constructor__( + _ffi_api.ProgramMeasurer, builder, runner, callbacks, verbose, max_continuous_error + ) + + +@tvm._ffi.register_object("auto_scheduler.LocalBuilder") +class LocalBuilder(ProgramBuilder): + """LocalBuilder use local CPU cores to build programs in parallel. + + Parameters + ---------- + timeout : int = 15 + The timeout limit (in second) for each build thread. + This is used in a wrapper of the multiprocessing.Process.join(). + n_parallel : int = multiprocessing.cpu_count() + Number of threads used to build in parallel. + build_func: callable or str = "default" + If is 'default', use default build function + If is 'ndk', use function for android ndk + If is callable, use it as custom build function, expect lib_format field. + """ + + def __init__(self, timeout=15, n_parallel=multiprocessing.cpu_count(), build_func="default"): + if build_func == "default": + BuildFunc.name = "default" + BuildFunc.build_func = tar.tar + elif build_func == "ndk": + BuildFunc.name = "ndk" + BuildFunc.build_func = ndk.create_shared + elif callable(build_func): + BuildFunc.name = "custom" + BuildFunc.build_func = build_func + else: + raise ValueError("Invalid build_func" + build_func) + + self.__init_handle_by_constructor__( + _ffi_api.LocalBuilder, timeout, n_parallel, BuildFunc.name + ) + + +@tvm._ffi.register_object("auto_scheduler.LocalRunner") +class LocalRunner(ProgramRunner): + """LocalRunner that uses local CPU/GPU to measures the time cost of programs. + + Parameters + ---------- + timeout : int = 10 + The timeout limit (in second) for each run. + This is used in a wrapper of the multiprocessing.Process.join(). + number : int = 3 + The number of times to run the generated code for taking average. + We call these runs as one `repeat` of measurement. + repeat : int = 1 + The number of times to repeat the measurement. + In total, the generated code will be run (1 + number x repeat) times, + where the first "1" is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + min_repeat_ms : int = 100 + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + cooldown_interval : float = 0.0 + The cool down interval between two measurements in seconds. + enable_cpu_cache_flush: bool = False + Whether to flush cache on CPU between repeated measurements. + Flushing cache can make the measured latency of one operator closer to + its actual latency during end-to-end inference. + To make this option effective, the argument `number` should also be set to 1. + This is only has effect on CPU task. + device: int = 0 + Which device to run on if multiple are available. + """ + + def __init__( + self, + timeout=10, + number=3, + repeat=1, + min_repeat_ms=100, + cooldown_interval=0.0, + enable_cpu_cache_flush=False, + device=0, + ): + if enable_cpu_cache_flush: + number = 1 + min_repeat_ms = 0 + + self.__init_handle_by_constructor__( + _ffi_api.LocalRunner, + timeout, + number, + repeat, + min_repeat_ms, + cooldown_interval, + enable_cpu_cache_flush, + device, + ) + + +@tvm._ffi.register_object("auto_scheduler.RPCRunner") +class RPCRunner(ProgramRunner): + """RPCRunner that uses RPC call to measures the time cost of programs on remote devices. + Or sometime we may need to use RPC even in local running to insulate the thread environment. + (e.g. running CUDA programs) + + Parameters + ---------- + key : str + The key of the device registered in the RPC tracker. + host : str + The host address of the RPC Tracker. + port : int + The port of RPC Tracker. + priority : int = 1 + The priority of this run request, larger is more prior. + n_parallel : int = 1 + The number of tasks run in parallel. + timeout : int = 10 + The timeout limit (in second) for each run. + This is used in a wrapper of the multiprocessing.Process.join(). + number : int = 3 + The number of times to run the generated code for taking average. + We call these runs as one `repeat` of measurement. + repeat : int = 1 + The number of times to repeat the measurement. + In total, the generated code will be run (1 + number x repeat) times, + where the first "1" is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + min_repeat_ms : int = 100 + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + cooldown_interval : float = 0.0 + The cool down interval between two measurements in seconds. + enable_cpu_cache_flush: bool = False + Whether to flush cache on CPU between repeated measurements. + Flushing cache can make the measured latency of one operator closer to + its actual latency during end-to-end inference. + To make this option effective, the argument `number` should also be set to 1. + This is only has effect on CPU task. + device: int = 0 + Which device to run on if multiple are available. + """ + + def __init__( + self, + key, + host, + port, + priority=1, + n_parallel=1, + timeout=10, + number=3, + repeat=1, + min_repeat_ms=100, + cooldown_interval=0.0, + enable_cpu_cache_flush=False, + device=0, + ): + self.__init_handle_by_constructor__( + _ffi_api.RPCRunner, + key, + host, + port, + priority, + n_parallel, + timeout, + number, + repeat, + min_repeat_ms, + cooldown_interval, + enable_cpu_cache_flush, + device, + ) + + if check_remote(key, host, port, priority, timeout): + print("Get devices for measurement successfully!") + else: + raise RuntimeError( + "Cannot get remote devices from the tracker. " + "Please check the status of tracker by " + "'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' " + "and make sure you have free devices on the queue status." + ) + + +class LocalRPCMeasureContext: + """A context wrapper for running RPCRunner locally. + This will launch a local RPC Tracker and local RPC Server. + + Parameters + ---------- + priority : int = 1 + The priority of this run request, larger is more prior. + n_parallel : int = 1 + The number of tasks run in parallel. + timeout : int = 10 + The timeout limit (in second) for each run. + This is used in a wrapper of the multiprocessing.Process.join(). + number : int = 3 + The number of times to run the generated code for taking average. + We call these runs as one `repeat` of measurement. + repeat : int = 1 + The number of times to repeat the measurement. + In total, the generated code will be run (1 + number x repeat) times, + where the first "1" is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + min_repeat_ms : int = 0 + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + cooldown_interval : float = 0.0 + The cool down interval between two measurements in seconds. + enable_cpu_cache_flush: bool = False + Whether to flush cache on CPU between repeated measurements. + Flushing cache can make the measured latency of one operator closer to + its actual latency during end-to-end inference. + To make this option effective, the argument `number` should also be set to 1. + This is only has effect on CPU task. + device: int = 0 + Which device to run on if multiple are available. + """ + + def __init__( + self, + priority=1, + n_parallel=1, + timeout=10, + number=3, + repeat=1, + min_repeat_ms=0, + cooldown_interval=0.0, + enable_cpu_cache_flush=False, + device=0, + ): + # pylint: disable=import-outside-toplevel + from tvm.rpc.server import Server + from tvm.rpc.tracker import Tracker + + self.tracker = Tracker(port=9000, port_end=10000, silent=True) + device_key = f"$local$device${self.tracker.port}" + self.server = Server( + port=self.tracker.port, + port_end=10000, + key=device_key, + silent=True, + tracker_addr=("127.0.0.1", self.tracker.port), + ) + self.runner = RPCRunner( + device_key, + "127.0.0.1", + self.tracker.port, + priority, + n_parallel, + timeout, + number, + repeat, + min_repeat_ms, + cooldown_interval, + enable_cpu_cache_flush, + device, + ) + # Wait for the processes to start + time.sleep(0.5) + + def __del__(self): + # Close the tracker and server before exit + self.tracker.terminate() + self.server.terminate() + time.sleep(0.5) + + +class MeasureErrorNo(object): + """Error type for MeasureResult.""" + + NO_ERROR = 0 # No error + INSTANTIATION_ERROR = 1 # Errors happen when apply transform steps from init state + COMPILE_HOST = 2 # Errors happen when compiling code on host (e.g., tvm.build) + COMPILE_DEVICE = 3 # Errors happen when compiling code on device + # (e.g. OpenCL JIT on the device) + RUNTIME_DEVICE = 4 # Errors happen when run program on device + WRONG_ANSWER = 5 # Answer is wrong when compared to a reference output + BUILD_TIMEOUT = 6 # Timeout during compilation + RUN_TIMEOUT = 7 # Timeout during run + UNKNOWN_ERROR = 8 # Unknown error + + +def _local_build_worker(inp_serialized, build_func, verbose): + tic = time.time() + inp = MeasureInput.deserialize(inp_serialized) + task = inp.task + task.target, task.target_host = Target.canon_target_and_host(task.target, task.target_host) + + error_no = MeasureErrorNo.NO_ERROR + error_msg = None + args = [] + + try: + sch, args = task.compute_dag.apply_steps_from_state( + inp.state, layout_rewrite=task.layout_rewrite_option + ) + # pylint: disable=broad-except + except Exception: + error_no = MeasureErrorNo.INSTANTIATION_ERROR + error_msg = make_traceback_info() + + if error_no == 0: + dirname = tempfile.mkdtemp() + filename = os.path.join(dirname, "tmp_func." + build_func.output_format) + + try: + with transform.PassContext().current(): + func = build_module.build(sch, args, target=task.target) + func.export_library(filename, fcompile=build_func) + # pylint: disable=broad-except + except Exception: + error_no = MeasureErrorNo.COMPILE_HOST + error_msg = make_traceback_info() + else: + filename = "" + + if verbose >= 1: + if error_no == MeasureErrorNo.NO_ERROR: + print(".", end="", flush=True) + else: + print(".E", end="", flush=True) # Build error + + return filename, args, error_no, error_msg, time.time() - tic + + +def local_build_worker(args): + """ + Build function of LocalBuilder to be ran in the Builder thread pool. + + Parameters + ---------- + args: Tuple[MeasureInput, callable, int] + inputs, build-func, verbose args passed to local_builder_build + + Returns + ------- + res : BuildResult + The build result of this Builder thread. + """ + inp, build_func, verbose = args + + return _local_build_worker(inp, build_func, verbose) + + +@tvm._ffi.register_func("auto_scheduler.local_builder.build") +def local_builder_build(inputs, timeout, n_parallel, build_func="default", verbose=1): + """ + Build function of LocalBuilder to build the MeasureInputs to runnable modules. + + Parameters + ---------- + inputs : List[MeasureInput] + The MeasureInputs to be built. + timeout : int + The timeout limit (in second) for each build thread. + This is used in a wrapper of the multiprocessing.Process.join(). + n_parallel : int + Number of threads used to build in parallel. + build_func : str = 'default' + The name of build function to process the built module. + verbose: int = 1 + Verbosity level. 0 for silent, 1 to output information during program building. + + Returns + ------- + res : List[BuildResult] + The build results of these MeasureInputs. + """ + assert build_func == BuildFunc.name, ( + "BuildFunc.name: " + BuildFunc.name + ", but args is: " + build_func + ) + executor = PopenPoolExecutor( + n_parallel, timeout, reset_global_scope, (AutotvmGlobalScope.current,) + ) + tuple_res = executor.map_with_error_catching( + local_build_worker, [(i.serialize(), BuildFunc.build_func, verbose) for i in inputs] + ) + + results = [] + for res in tuple_res: + if res.status == StatusKind.COMPLETE: + results.append(BuildResult(*res.value)) + elif res.status == StatusKind.TIMEOUT: + if verbose >= 1: + print(".T", end="", flush=True) # Build timeout + results.append(BuildResult(None, [], MeasureErrorNo.BUILD_TIMEOUT, None, timeout)) + elif res.status == StatusKind.EXCEPTION: + if verbose >= 1: + print(".E", end="", flush=True) # Build error + results.append( + BuildResult(None, [], MeasureErrorNo.COMPILE_HOST, repr(res.value), timeout) + ) + else: + raise ValueError("Result status is not expected. Unreachable branch") + + return results + + +TASK_INPUT_CHECK_FUNC_REGISTRY = {} + + +def register_task_input_check_func(func_name, f=None, override=False): + """Register a function that checks the input buffer map. + + The input function should take a list of Tensor wich indicate the Input/output Tensor of a TVM + subgraph and return a Map from the input Tensor to its buffer name. + + Parameters + ---------- + func_name : Union[Function, str] + The check function that returns the compute declaration Tensors or its function name. + f : Optional[Function] + The check function to be registered. + override : boolean = False + Whether to override existing entry. + + Examples + -------- + .. code-block:: python + + @auto_scheduler.register_task_input_check_func + def check_task_input_by_placeholder_name(args : List[Tensor]): + tensor_input_map = {} + for arg in args: + if isinstance(arg.op, tvm.te.PlaceholderOp): + if arg.op.name != "placeholder": + tensor_input_map[arg] = arg.op.name + return tensor_input_map + """ + global TASK_INPUT_CHECK_FUNC_REGISTRY + + if callable(func_name): + f = func_name + func_name = get_func_name(f) + if not isinstance(func_name, str): + raise ValueError("expect string function name") + + def register(myf): + """internal register function""" + if func_name in TASK_INPUT_CHECK_FUNC_REGISTRY and not override: + raise RuntimeError(f"{func_name} has been registered already") + TASK_INPUT_CHECK_FUNC_REGISTRY[func_name] = myf + return myf + + if f: + return register(f) + return register + + +def prepare_input_map(args, workload_key=None): + """This function deals with special task inputs. Map the input Tensor of a TVM subgraph + to a specific buffer name in the global buffer map. + + Parameters + ---------- + args : List[Tensor] + Input/output Tensor of a TVM subgraph. + + workload_key: Optional[str] + The workload for which these inputs are being prepared. This + is used to identify if an input is being provided by (see + `register_task_input_buffer`). + + Returns + ------- + Dict[Tensor, str] : + Map from the input Tensor to its buffer name. + + Notes + ----- + The buffer name is specially designed, and these buffer should be provided in + `SearchTask(..., task_inputs={...})`. + """ + # pylint: disable=import-outside-toplevel + + global TASK_INPUT_CHECK_FUNC_REGISTRY + + from .search_task import TASK_INPUT_BUFFER_TABLE + + # A dict that maps the input tensor arg to a buffer name + tensor_input_map = {} + + # Case 0: Check placeholder name + for arg in args: + if isinstance(arg.op, tvm.te.PlaceholderOp): + if ( + workload_key + and workload_key in TASK_INPUT_BUFFER_TABLE + and arg.op.name in TASK_INPUT_BUFFER_TABLE[workload_key] + ): + tensor_input_map[arg] = arg.op.name + + # Case 1: Check specific tensor inputs + for func_name in TASK_INPUT_CHECK_FUNC_REGISTRY: + func = TASK_INPUT_CHECK_FUNC_REGISTRY[func_name] + tensor_input_map.update(func(args)) + + return tensor_input_map + + +def prepare_runner_args(inp, build_res): + """This function prepares the pre-defined arguments in `TASK_INPUT_BUFFER_TABLE` for local/rpc + runner in main process + + Parameters + ---------- + inp : MeasureInput + Measure input to be measured. + + build_res : BuildResult + Build result to be measured. + + Returns + ------- + List[Optional[numpy.ndarray]] : + List of arguments for running the program. If the argument does not have a pre-defined input + buffer, None is added to the list as a placeholder. + + """ + # pylint: disable=import-outside-toplevel + from .search_task import get_task_input_buffer # lazily import to avoid recursive dependency + + task_input_names = inp.task.task_input_names + tensor_input_map = prepare_input_map(build_res.args, inp.task.workload_key) + if not task_input_names: + tensor_input_map = {} + args = [] + task_inputs_count = 0 + for arg in build_res.args: + if arg in tensor_input_map: + tensor_name = tensor_input_map[arg] + if tensor_name in task_input_names: + task_input_buffer = get_task_input_buffer(inp.task.workload_key, tensor_name) + # convert tvm.NDArray to picklable numpy.ndarray + args.append(task_input_buffer.numpy()) + task_inputs_count += 1 + else: + raise ValueError( + f"{tensor_name} not found in task_inputs, " + f"should provide with `SearchTask(..., task_inputs={{...}})`" + ) + else: + args.append(None) + if task_inputs_count != len(task_input_names): + raise RuntimeError("task_inputs not fully matched, check if there's any unexpected error") + return args + + +def _timed_eval_func( + inp_serialized, + build_res, + args, + number, + repeat, + min_repeat_ms, + cooldown_interval, + enable_cpu_cache_flush, + verbose, + device, +): + inp = MeasureInput.deserialize(inp_serialized) + tic = time.time() + error_no = 0 + error_msg = None + try: + func = module.load_module(build_res.filename) + dev = ndarray.device(str(inp.task.target), device) + # Limitation: + # We can not get PackFunction directly in the remote mode as it is wrapped + # under the std::function. We could lift the restriction later once we fold + # the PackedFunc as an object. Currently, we pass function name to work + # around it. + f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else "" + time_f = func.time_evaluator( + func.entry_name, + dev, + number=number, + repeat=repeat, + min_repeat_ms=min_repeat_ms, + f_preproc=f_prepare, + ) + # pylint: disable=broad-except + except Exception: + costs = (MAX_FLOAT,) + error_no = MeasureErrorNo.COMPILE_DEVICE + error_msg = make_traceback_info() + + if error_no == 0: + try: + random_fill = tvm.get_global_func("tvm.contrib.random.random_fill", True) + assert random_fill, "Please make sure USE_RANDOM is ON in the config.cmake" + assert len(args) == len(build_res.args) + loc_args = [] + # pylint: disable=consider-using-enumerate + for idx in range(len(args)): + if args[idx] is None: + build_res_arg = build_res.args[idx] + empty_array = ndarray.empty( + get_const_tuple(build_res_arg.shape), build_res_arg.dtype, dev + ) + random_fill(empty_array) + loc_args.append(empty_array) + else: + loc_args.append(ndarray.array(args[idx], dev)) + dev.sync() + costs = time_f(*loc_args).results + # pylint: disable=broad-except + except Exception: + costs = (MAX_FLOAT,) + error_no = MeasureErrorNo.RUNTIME_DEVICE + error_msg = make_traceback_info() + + shutil.rmtree(os.path.dirname(build_res.filename)) + toc = time.time() + time.sleep(cooldown_interval) + + if verbose >= 1: + if error_no == MeasureErrorNo.NO_ERROR: + print("*", end="", flush=True) + else: + print("*E", end="", flush=True) # Run error + return costs, error_no, error_msg, toc - tic + build_res.time_cost, toc + + +@tvm._ffi.register_func("auto_scheduler.local_runner.run") +def local_run( + inputs, + build_results, + timeout=10, + number=3, + repeat=1, + min_repeat_ms=0, + cooldown_interval=0, + enable_cpu_cache_flush=False, + verbose=1, + device=0, +): + """ + Run function of LocalRunner to test the performance of the input BuildResults. + + Parameters + ---------- + inputs : List[MeasureInput] + The MeasureInputs to be measured. + build_results : List[BuildResult] + The BuildResults to be measured. + timeout : int = 10 + The timeout limit (in second) for each run. + This is used in a wrapper of the multiprocessing.Process.join(). + number : int = 3 + The number of times to run the generated code for taking average. + We call these runs as one `repeat` of measurement. + repeat : int = 1 + The number of times to repeat the measurement. + In total, the generated code will be run (1 + number x repeat) times, + where the first "1" is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + min_repeat_ms : int = 0 + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + cooldown_interval : float = 0.0 + The cool down interval between two measurements in seconds. + enable_cpu_cache_flush: bool = False + Whether to flush cache on CPU between repeated measurements. + Flushing cache can make the measured latency of one operator closer to + its actual latency during end-to-end inference. + To make this option effective, the argument `number` should also be set to 1. + This is only has effect on CPU task. + verbose: int = 1 + Verbosity level. 0 for silent, 1 to output information during program measuring. + device: int = 0 + Which device to run on if multiple are available. + + Returns + ------- + res : List[MeasureResult] + The measure results of these MeasureInputs. + """ + + measure_results = [] + assert len(inputs) == len(build_results), "Measure input size should be equal to build results" + worker = PopenWorker() + for inp, build_res in zip(inputs, build_results): + if build_res.error_no != 0: + res = ( + (MAX_FLOAT,), + build_res.error_no, + build_res.error_msg, + build_res.time_cost, + time.time(), + ) + else: + args = prepare_runner_args(inp, build_res) + res = call_func_with_timeout( + worker, + timeout, + _timed_eval_func, + args=( + inp.serialize(), + build_res, + args, + number, + repeat, + min_repeat_ms, + cooldown_interval, + enable_cpu_cache_flush, + verbose, + device, + ), + ) + if isinstance(res, TimeoutError): + if verbose >= 1: + print("*T", end="", flush=True) # Run timeout + res = ( + (MAX_FLOAT,), + MeasureErrorNo.RUN_TIMEOUT, + None, + build_res.time_cost + timeout, + time.time(), + ) + elif isinstance(res, Exception): + if verbose >= 1: + print("*E", end="", flush=True) # Run error + res = ( + (MAX_FLOAT,), + MeasureErrorNo.RUNTIME_DEVICE, + str(res), + build_res.time_cost + timeout, + time.time(), + ) + + measure_results.append(MeasureResult(*res)) + + if verbose >= 1: + print("", flush=True) + + return measure_results + + +def _rpc_run( + inp_serialized, + build_res, + args, + key, + host, + port, + priority, + timeout, + number, + repeat, + min_repeat_ms, + cooldown_interval, + enable_cpu_cache_flush, + verbose, + device, +): + inp = MeasureInput.deserialize(inp_serialized) + tic = time.time() + error_no = 0 + error_msg = None + try: + # upload built module + remote = request_remote(key, host, port, priority, timeout) + remote.upload(build_res.filename) + func = remote.load_module(os.path.split(build_res.filename)[1]) + dev = remote.device(str(inp.task.target), device) + # Limitation: + # We can not get PackFunction directly in the remote mode as it is wrapped + # under the std::function. We could lift the restriction later once we fold + # the PackedFunc as an object. Currently, we pass function name to work + # around it. + f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else "" + time_f = func.time_evaluator( + func.entry_name, + dev, + number=number, + repeat=repeat, + min_repeat_ms=min_repeat_ms, + f_preproc=f_prepare, + ) + # pylint: disable=broad-except + except Exception: + costs = (MAX_FLOAT,) + error_no = MeasureErrorNo.COMPILE_DEVICE + error_msg = make_traceback_info() + + if error_no == 0: + try: + stream = dev.create_raw_stream() + dev.set_raw_stream(stream) + random_fill = remote.get_function("tvm.contrib.random.random_fill") + assert ( + random_fill + ), "Please make sure USE_RANDOM is ON in the config.cmake on the remote devices" + + assert len(args) == len(build_res.args) + loc_args = [] + # pylint: disable=consider-using-enumerate + for idx in range(len(args)): + if args[idx] is None: + build_res_arg = build_res.args[idx] + empty_array = ndarray.empty( + get_const_tuple(build_res_arg.shape), build_res_arg.dtype, dev + ) + random_fill(empty_array) + loc_args.append(empty_array) + else: + loc_args.append(ndarray.array(args[idx], dev)) + dev.sync() + + # First run for check that the kernel is correct + func.entry_func(*loc_args) + dev.sync() + + costs = time_f(*loc_args).results + + # clean up remote files + remote.remove(build_res.filename) + remote.remove(os.path.splitext(build_res.filename)[0] + ".so") + remote.remove("") + dev.free_raw_stream(stream) + # pylint: disable=broad-except + except Exception: + dev.free_raw_stream(stream) + costs = (MAX_FLOAT,) + error_no = MeasureErrorNo.RUNTIME_DEVICE + error_msg = make_traceback_info() + + shutil.rmtree(os.path.dirname(build_res.filename)) + toc = time.time() + + time.sleep(cooldown_interval) + if verbose >= 1: + if error_no == MeasureErrorNo.NO_ERROR: + print("*", end="") + else: + print("*E", end="") # Run error + + return costs, error_no, error_msg, toc - tic + build_res.time_cost, toc + + +def _rpc_run_worker(args): + """Function to be ran in the RPCRunner thread pool. + + Parameters + ---------- + args : Tuple[MeasureInput, BuildResult, ...] + Single input and build result plus the rest of the arguments to `rpc_runner_run`. + + Returns + ------- + res : MeasureResult + The measure result of this Runner thread. + """ + _, build_res, _, _, _, _, _, timeout, _, _, _, _, _, verbose, _ = args + if build_res.error_no != MeasureErrorNo.NO_ERROR: + return ( + (MAX_FLOAT,), + build_res.error_no, + build_res.error_msg, + build_res.time_cost, + time.time(), + ) + + try: + res = _rpc_run(*args) + # pylint: disable=broad-except + except Exception: + if verbose >= 1: + print("*E", end="") # Run error + res = ( + (MAX_FLOAT,), + MeasureErrorNo.RUNTIME_DEVICE, + make_traceback_info(), + build_res.time_cost + timeout, + time.time(), + ) + + return res + + +@tvm._ffi.register_func("auto_scheduler.rpc_runner.run") +def rpc_runner_run( + inputs, + build_results, + key, + host, + port, + priority=1, + n_parallel=1, + timeout=10, + number=3, + repeat=1, + min_repeat_ms=0, + cooldown_interval=0.0, + enable_cpu_cache_flush=False, + verbose=1, + device=0, +): + """Run function of RPCRunner to test the performance of the input BuildResults. + + Parameters + ---------- + inputs : List[MeasureInput] + The MeasureInputs to be measured. + build_results : List[BuildResult] + The BuildResults to be measured. + key : str + The key of the device registered in the RPC tracker. + host : str + The host address of the RPC Tracker. + port : int + The port of RPC Tracker. + priority : int = 1 + The priority of this run request, larger is more prior. + n_parallel : int = 1 + The number of tasks run in parallel. + timeout : int = 10 + The timeout limit (in second) for each run. + This is used in a wrapper of the multiprocessing.Process.join(). + number : int = 3 + The number of times to run the generated code for taking average. + We call these runs as one `repeat` of measurement. + repeat : int = 1 + The number of times to repeat the measurement. + In total, the generated code will be run (1 + number x repeat) times, + where the first "1" is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + min_repeat_ms : int = 0 + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + cooldown_interval : float = 0.0 + The cool down interval between two measurements in seconds. + enable_cpu_cache_flush: bool = False + Whether to flush cache on CPU between repeated measurements. + Flushing cache can make the measured latency of one operator closer to + its actual latency during end-to-end inference. + To make this option effective, the argument `number` should also be set to 1. + This is only has effect on CPU task. + verbose: int = 1 + Verbosity level. 0 for silent, 1 to output information during program measuring. + device: int = 0 + Which device to run on if multiple are available. + + Returns + ------- + res : List[MeasureResult] + The measure results of these MeasureInputs. + """ + assert len(inputs) == len(build_results), "Measure input size should be equal to build results" + # This pool is not doing computationally intensive work, so we can use threads + executor = PopenPoolExecutor(n_parallel) + tuple_res = executor.map_with_error_catching( + _rpc_run_worker, + [ + ( + inp.serialize(), + build_res, + prepare_runner_args(inp, build_res), + key, + host, + port, + priority, + timeout, + number, + repeat, + min_repeat_ms, + cooldown_interval, + enable_cpu_cache_flush, + verbose, + device, + ) + for inp, build_res in zip(inputs, build_results) + ], + ) + + results = [] + for i, res in enumerate(tuple_res): + if res.status == StatusKind.COMPLETE: + results.append(MeasureResult(*res.value)) + else: + assert res.status == StatusKind.TIMEOUT + if verbose >= 1: + print("*T", end="") # Run timeout + build_res = build_results[i] + results.append( + MeasureResult( + (MAX_FLOAT,), + MeasureErrorNo.RUN_TIMEOUT, + None, + build_res.time_cost + timeout, + time.time(), + ) + ) + + if verbose >= 1: + print("") + + return results diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/measure_record.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/measure_record.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa8fdcd9138fd288de5110376b7dc0fa83d95f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/measure_record.py @@ -0,0 +1,366 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, pointless-string-statement + +""" Serialization and other I/O support for measurement records (tuning logs). """ +import argparse +import logging +import os +import itertools + +import numpy as np + +import tvm._ffi +from tvm.runtime import Object +from .measure import MeasureErrorNo, MeasureCallback +from .utils import calc_workload_dis_factor, decode_workload_key +from . import _ffi_api + +logger = logging.getLogger("auto_scheduler") + + +@tvm._ffi.register_object("auto_scheduler.RecordToFile") +class RecordToFile(MeasureCallback): + """ + A measurement callback that writes measurement records into a file. + + Parameters + ---------- + filename : str + File name for this callback to write log to. + """ + + def __init__(self, filename): + dirname = os.path.dirname(os.path.abspath(filename)) + if not os.path.exists(dirname): + os.makedirs(dirname) + self.__init_handle_by_constructor__(_ffi_api.RecordToFile, filename) + + +@tvm._ffi.register_object("auto_scheduler.RecordReader") +class RecordReader(Object): + """ + Reader of the json log file. + + Parameters + ---------- + filename : str + File name for this reader to load log from. + """ + + def __init__(self, filename): + if not os.path.exists(filename): + logger.warning("%s does not exist!", filename) + # a set to prevent print duplicated message + self.messages = set() + self.__init_handle_by_constructor__(_ffi_api.RecordReader, filename) + + def check_workload_key(self, inputs): + """Check and throw warnings for records with old format workload key. + + Parameters + ---------- + inputs: List[MeasureInput] + The measure inputs to be checked. + + Notes + ----- + This checker could be deprecated in the future. + """ + for inp in inputs: + _, args = decode_workload_key(inp.task.workload_key) + if args is None: + continue + if not args: + msg = ( + "MeasureInput with old format workload key %s should be updated " + "using the script from https://github.com/apache/tvm/pull/7317." + % inp.task.workload_key + ) + if msg not in self.messages: + self.messages.add(msg) + logger.warning(msg) + + def read_lines(self, max_lines=None, skip_lines=0): + """Read multiple lines from the log file. + + Parameters + ---------- + max_lines : Optional[int] + The maximum number of lines. None to read all lines. + skip_lines : int = 0 + Skip the first n lines. + + Returns + ------- + inputs : List[auto_scheduler.measure.MeasureInput] + The MeasureInputs loaded from the log file. + results : List[auto_scheduler.measure.MeasureResult] + The MeasureResults loaded from the log file. + + Notes + ----- + Some unimportant and expensive fields in the returned MeasureInput are not deserialized + for faster read speed (e.g. input.task.compute_dag, input.state.stages). + If you want to use them, you can call the :code:`recover_measure_input` below + to rebuild these fields. + """ + inputs, results = _ffi_api.RecordReaderReadLines( + self, max_lines if max_lines else -1, skip_lines + ) + self.check_workload_key(inputs) + return inputs, results + + def __iter__(self): + while True: + ret = _ffi_api.RecordReaderReadNext(self) + if not ret: + break + self.check_workload_key([ret[0]]) + yield ret[0], ret[1] # (input, result) + + +def load_record_from_string(record): + """ + Load the measure record from string. + + Parameters + ---------- + record: str + A record string, including the serialized MeausreInput and MeasureResult. + + Returns + ------- + ret: Tuple[MeasureInput, MeasureResult] + A tuple of MeasureInput, MeasureResult. + """ + return _ffi_api.ReadMeasureRecord(record) + + +def dump_record_to_string(inp, res): + """ + Dump the measure record to a string. + + Parameters + ---------- + inp: MeasureInput + The measure input. + + res: MeasureResult + The measure result. + + Returns + ------- + ret: str + The dumped string. + """ + return _ffi_api.WriteMeasureRecords(inp, res) + + +def load_records(filename): + """ + Load measurement records from a file. + + Parameters + ---------- + filename : str + File name to load log from. + + Returns + ------- + logs : List[auto_scheduler.measure.MeasureInput, auto_scheduler.measure.MeasureResult] + + Notes + ----- + Some unimportant and expensive fields in the returned MeasureInput are not deserialized + for faster read speed (e.g., input.task.compute_dag, input.state.stages). + If you want to use them, you can call the :code:`recover_measure_input` below + to rebuild these fields. + """ + return zip(*RecordReader(filename).read_lines()) + + +def save_records(filename, inputs, results): + """ + Append measure records to file. + + Parameters + ---------- + filename : str + File name to write log to. + inputs: List[MeasureInputs] + The MeasureInputs to be written. + results: List[MeasureResults] + The MeasureResults to be written. + """ + dirname = os.path.dirname(os.path.abspath(filename)) + if not os.path.exists(dirname): + os.makedirs(dirname) + _ffi_api.SaveRecords(filename, inputs, results) + + +def load_best_record(filename, workload_key=None, target=None, include_compatible=False): + """Return the best measurement pair form a log file. This may return none results if + there is no legal measure pair with the specified workload_key/target found from the log file. + + Parameters + ---------- + filename : str + File name to load log from. + workload_key : Optional[str] + The workload key of the compute declaration. + With `None`, this returns the best measure pair of all workloads. + target : Optional[tvm.target.Target] + The target device. + With `None`, this returns the best measure pair of all target devices. + include_compatible: bool + When set to True, all compatible records in the log file will be considered. + + Returns + ------- + input : auto_scheduler.measure.MeasureInput + The best State's MeasureInput from this log fine. + result : auto_scheduler.measure.MeasureResult + The best State's MeasureResult from this log fine. + """ + log_reader = RecordReader(filename) + best_cost = 1e30 + best_inp = None + best_res = None + + for inp, res in log_reader: + if res.error_no != MeasureErrorNo.NO_ERROR: + continue + if target and inp.task.target.kind.name != target.kind.name: + continue + + costs = [v.value for v in res.costs] + cost = np.mean(costs) + + if workload_key is not None: + dis_f = calc_workload_dis_factor( + decode_workload_key(workload_key), decode_workload_key(inp.task.workload_key) + ) + if dis_f == float("inf"): + continue + if not include_compatible and dis_f != 1: + continue + + # Since different workloads have different FLOPS, we multiply the factor to + # eliminate this difference, which is basically the concept of throughput. + cost *= dis_f + + if cost < best_cost: + best_cost = cost + best_inp = inp + best_res = res + + return best_inp, best_res + + +def distill_record_file(in_file, out_file): + """ + Pick the best entries from a record file and store them to another file. + This function distills the useful log entries from a large log file. + If out_file already exists, the best entries from both + in_file and out_file will be saved. + + Parameters + ---------- + in_file: str + The filename of input + out_file: str or file + The filename of output + """ + # pylint: disable=import-outside-toplevel + from .dispatcher import ApplyHistoryBest + + context = load_records(in_file) + + dirname = os.path.dirname(os.path.abspath(out_file)) + if not os.path.exists(dirname): + os.makedirs(dirname) + + if os.path.isfile(out_file): + out_context = load_records(out_file) + context = itertools.chain(context, out_context) + + def measure_input_str_key(inp): + return _ffi_api.SerializeMeasureInput(inp) + + # Dict[target key, + # Dict[workload hash, + # Dict[workload args, (cost, (MeasureInput, MeasureResult))]]] + # Full type: Dict[str, Dict[str, Dict[Tuple, Tuple[float, Tuple[Measureinput, MeasureResult]]]]] + best_records = {} + + for inp, res in context: + if res.error_no != 0: + continue + + # Keep the best record for each target and workload. + costs = [x.value for x in res.costs if isinstance(x, tvm.tir.expr.FloatImm)] + cost = np.mean(costs) + for k in inp.task.target.keys: + entry, _, workload_args = ApplyHistoryBest.get_workload_entry( + best_records, k, inp.task.workload_key + ) + if workload_args not in entry or cost < entry[workload_args][0]: + entry[workload_args] = (cost, (inp, res)) + + # Remove duplications by multiple target keys. + out_records = {} + for target_entry in best_records.values(): + for workload_entry in target_entry.values(): + for _, (inp, res) in workload_entry.values(): + out_records[measure_input_str_key(inp)] = (inp, res) + + inputs = [] + results = [] + for inp, res in out_records.values(): + inputs.append(inp) + results.append(res) + + # create a new file and save the best records + open(out_file, "w") + save_records(out_file, inputs, results) + logger.info("Extract %d best records from %s to %s", len(inputs), in_file, out_file) + + +def main(): + """The main function for CLI.""" + parser = argparse.ArgumentParser() + parser.add_argument("--mode", choices=["distill"], default="distill") + parser.add_argument("-i", "--input", type=str, help="input file") + parser.add_argument("-o", "--output", type=str, default=None, help="output file") + + args = parser.parse_args() + logging.basicConfig() + logger.setLevel(logging.INFO) + + if args.mode == "distill": + args.output = args.output or args.input + ".best.json" + distill_record_file(args.input, args.output) + + +""" +Usage: +* Distill the best entries from a large log file +e.g. python -m tvm.auto_scheduler.measure_record --mode distill -i input.json +""" +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/relay_integration.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/relay_integration.py new file mode 100644 index 0000000000000000000000000000000000000000..973cbf19becedd43689286829df4a1615b40acde --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/relay_integration.py @@ -0,0 +1,493 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-variable,invalid-name + +""" +Integrate auto_scheduler into relay. It implements the following items: +1. Extract search tasks from a relay program +2. Provide auto-scheduling for all TOPI compute functions +""" + +import json +import logging +import threading +import traceback + +import tvm +from tvm import autotvm, transform +from tvm._ffi.base import TVMError +from tvm.ir.transform import PassContext +from tvm.runtime import convert_to_object +from tvm.target import Target +from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor +from tvm.tir import Reduce +from tvm.tir import expr as _expr + +from . import _ffi_api +from .compute_dag import ComputeDAG, LayoutRewriteOption +from .dispatcher import DispatchContext +from .search_task import SearchTask +from .utils import get_const_tuple +from .workload_registry import register_workload_tensors + +logger = logging.getLogger("auto_scheduler") + + +def call_all_topi_funcs(mod, params, target, error_list, opt_level=3): + """Call all TOPI compute to extract auto_scheduler tasks in a Relay program""" + # pylint: disable=import-outside-toplevel + from tvm import relay + + # Turn off AutoTVM config not found warnings + old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent + autotvm.GLOBAL_SCOPE.silent = True + + with transform.PassContext( + opt_level=opt_level, + config={ + "relay.backend.use_auto_scheduler": True, + }, + disabled_pass={"AutoSchedulerLayoutRewrite"}, + ): + compiler = relay.vm.VMCompiler() + if params: + compiler.set_params(params) + mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod + try: + compiler.lower(mod, target) + except TVMError: + error_list.append(f"{traceback.format_exc()}") + finally: + autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent + + +def extract_tasks( + mod, + params, + target, + target_host=None, + hardware_params=None, + include_simple_tasks=False, + dump_workload_to_dag_log=None, + opt_level=3, + other_targets=None, +): + """Extract tuning tasks from a relay program. + + Parameters + ---------- + mod: tvm.IRModule or relay.function.Function + The module or function to tune + params: dict of str to numpy array + The associated parameters of the program + target: Union[tvm.target.Target, str] + The compilation target + target_host: Optional[Union[tvm.target.Target, str]] + The host compilation target + hardware_params : Optional[HardwareParams] + Hardware parameters used for the search tasks + include_simple_tasks: bool + Whether to extract simple tasks that do not include complicated ops. + dump_workload_to_dag_log: Optional[str] + A file to dump an association between the workload keys and the actual DAG + opt_level : Optional[int] + The optimization level of the task extractions. + other_targets: Optional[List[tvm.target.Target]] + Other targets for call_all_topi_funcs, e.g., cutlass target. + + Returns + ------- + tasks: List[SearchTask] + The tasks in this network + weights: List[int] + The weight (i.e. the number of appearance) of extracted tasks + """ + # pylint: disable=import-outside-toplevel + target, target_host = Target.canon_target_and_host(target, target_host) + + # Run the compiler to collect all TOPI calls during compilation. + env = TracingEnvironment( + TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY + ) + + dispatch_ctx = DispatchContext.current + old_verbose = dispatch_ctx.verbose + dispatch_ctx.verbose = 0 + + targets = [target] + if other_targets is not None: + targets += other_targets + errors = [] + with env: + # Wrap build call in a new thread to avoid the conflict + # between python's multiprocessing and tvm's thread pool + build_thread = threading.Thread( + target=call_all_topi_funcs, args=(mod, params, targets, errors, opt_level) + ) + build_thread.start() + build_thread.join() + + if errors: + error_strings = ["Task extraction had the following errors:"] + errors + raise TVMError("\n".join(error_strings)) + + dispatch_ctx.verbose = old_verbose + + # create search tasks + tasks = [] + weights = [] + for wkl_key, (weight, func_names) in env.wkl_key_to_weight.items(): + tasks.append( + SearchTask( + workload_key=wkl_key, + target=target, + hardware_params=hardware_params, + # When auto scheduler is used in end to end network, try to apply layout rewrite + # to improve the overall performance + layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True), + task_inputs=( + env.wkl_key_to_input_names[wkl_key] + if wkl_key in env.wkl_key_to_input_names + else None + ), + task_inputs_save_to_file=True, + desc=",".join(func_names), + ) + ) + weights.append(int(weight)) + + if dump_workload_to_dag_log is not None: + with open(dump_workload_to_dag_log, "w") as f: + json.dump({task.workload_key: str(task.compute_dag) for task in tasks}, f) + + return tasks, weights + + +class TracingMode: + """Two modes for tracing""" + + EXTRACT_TASK = 0 # trace all topi calls to extract tasks + # same as EXTRACT_TASK but ignore the task without complex ops + EXTRACT_COMPLEX_TASK_ONLY = 1 + PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite + + +class TracingEnvironment: + """Global environment for tracing all topi function calls""" + + current = None + + def __init__(self, tracing_mode): + self.tracing_mode = tracing_mode + self.relay_disable_build_cache = "false" + self.func_name_to_wkl_key = {} + self.wkl_key_to_weight = {} + self.wkl_key_to_input_names = {} + + def __enter__(self): + TracingEnvironment.current = self + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + TracingEnvironment.current = None + + def add_workload_key(self, func_name, workload_key): + """Add the workload key of a search task. + + Parameters + ---------- + func_name: str + The function name of the task. + + workload_key: str + The workload key of a task. + """ + self.func_name_to_wkl_key[func_name] = workload_key + if workload_key not in self.wkl_key_to_weight: + self.wkl_key_to_weight[workload_key] = (0, set()) + weight, func_names = self.wkl_key_to_weight[workload_key] + func_names.add(func_name) + self.wkl_key_to_weight[workload_key] = (weight + 1, func_names) + + def add_workload_input_names(self, workload_key, input_names): + """Add special task inputs to this workload. + + Parameters + ---------- + workload_key : str + The workload key of a task. + + input_names : List[str] + A list of input names. + """ + self.wkl_key_to_input_names[workload_key] = input_names + + +@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite") +def enter_layout_rewrite(): + """Enter layout rewrite tracing environment""" + env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE) + env.__enter__() + + +@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite") +def exit_layout_rewrite(): + """Exit layout rewrite tracing environment""" + env = TracingEnvironment.current + env.__exit__(None, None, None) + + +def traverse_to_get_io_tensors(outs): + """Traverse from a list of output tensors to get input/output tensors and + other useful information. + + Parameters + ---------- + outs: List[Tensor] + The output tensors + + Returns + ------- + io_tensors: List[Tensor] + The input and output tensors with static shape + has_layout_free: bool + Whether the compute DAG has layout_free placeholders + has_complex_op: bool + Whether the topi compute function includes at least one complex (reduce) op + """ + layout_free_ops = [] + inputs = [] + + has_complex_op = False + visited = set() + + def traverse(t): + nonlocal has_complex_op + + # We cannot directly add tensors to the set, because the comparison of + # two tensors with ndim=0 is ambiguous. + assert t.handle is not None + if t.handle.value in visited: + return + if isinstance(t.op, PlaceholderOp): + inputs.append(t) + elif isinstance(t.op, ComputeOp): + has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body]) + if "layout_free_placeholders" in t.op.attrs: + layout_free_ops.append(t.op) + for x in t.op.input_tensors: + traverse(x) + visited.add(t.handle.value) + + for t in outs: + traverse(t) + + io_tensors = inputs + list(outs) + for tensor in io_tensors: + # Reject the compute if any of its I/O tensors has dynamic shape. + if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]): + return ([], False, False) + + return (io_tensors, len(layout_free_ops) > 0, has_complex_op) + + +@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute") +def auto_schedule_topi(func_name, outs): + """Use auto-scheduler to schedule any topi compute function. + + Note: This is used internally for relay integration. Do + not use this as a general user-facing API. + + Parameters + ---------- + func_name: str + The name of the function being scheduled. + + outs: List[Tensor] + The output tensors of topi compute functions + + Returns + ------- + sch: Optional[te.Schedule] + A tuned schedule or none (if not tuned) in the final build mode; + None in the tracing mode so that the fallback topi schedule will be used. + """ + + # pylint: disable=import-outside-toplevel + from tvm.auto_scheduler.measure import ( # lazily import to avoid recursive dependency + prepare_input_map, + ) + + io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs) + if not io_tensors: # The compute includes dynamic shapes which are not supported yet. + return None + + try: + dag = ComputeDAG(io_tensors) + except tvm.error.TVMError as err: + logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err)) + return None + + workload_key = dag.workload_key() + key = register_workload_tensors(workload_key, io_tensors) + target = tvm.target.Target.current() + + dispatch_ctx = DispatchContext.current + state = dispatch_ctx.query(target, key, has_complex_op, dag, func_name) + schedule = None + + env = TracingEnvironment.current + if env is None: + # in the final build mode + if state is None: + return None + + schedule, _ = dag.apply_steps_from_state(state) + return schedule + + if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]: + # in the task extraction mode + if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK: + env.add_workload_key(func_name, key) + input_map = prepare_input_map(io_tensors, workload_key) + if input_map: + env.add_workload_input_names(key, list(input_map.values())) + elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE: + # in prepare_layout_rewrite mode + if ( + LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE + and has_layout_free + ): + if state is None: + return None + + # rewrite the layout and update the context for the new dag + new_dag = dag.rewrite_layout_from_state(state) + new_key = new_dag.workload_key() + if new_key != key: + dispatch_ctx.update(target, new_key, state) + else: + raise ValueError("Invalid tracing mode: " + env.tracing_mode) + + return schedule + + +@tvm._ffi.register_func("auto_scheduler.relay_integration.te_compiler_update_weights") +def te_compiler_update_weights(function_weights): + """A callback for updating the weights of extracted tasks. When using the TE compiler + that avoids compiling the same function multiple times by caching, all extracted tasks + have weight 1, so the TE compiler invokes this callback at the end. In this case, + we override existing weights with the use_count in TE compiler cache. + + Parameters + ---------- + function_weights: Dict[str, int] + Mapping from function names to their weights. + """ + env = TracingEnvironment.current + if env is not None: + # Override this map with the weights in the TE compiler. + env.wkl_key_to_weight = {} + + for func_name, weight in function_weights.items(): + # If the function name is not in the map, then it means we are not interested in + # this function during task extraction (e.g., a function without reduction). + if func_name not in env.func_name_to_wkl_key: + continue + + workload_key = env.func_name_to_wkl_key[func_name] + if workload_key not in env.wkl_key_to_weight: + env.wkl_key_to_weight[workload_key] = (0, set()) + + # Note that the function appears multiple times in a model will be renamed + # to make sure function names are unique, so we use the workload key generated + # from the function's TE compute to determine their weights. + old_weight, func_names = env.wkl_key_to_weight[workload_key] + func_names.add(func_name) + env.wkl_key_to_weight[workload_key] = (old_weight + weight, func_names) + + +def tensor_no_check_call(self, *indices): + """An indexing function without any check. + This is the same as `tvm.te.Tensor::__call__` except that the safety + check is removed. + """ + indices = convert_to_object(indices) + args = [] + for x in indices: + if isinstance(x, _expr.PrimExpr): + args.append(x) + elif isinstance(x, _expr.IterVar): + args.append(x.var) + else: + raise ValueError("The indices must be expression") + + return _expr.ProducerLoad(self, args) + + +def remove_index_check(tensor): + """Remove the safety check in the indexing function for a tensor. + This is done by monkey patching its indexing function. + After removing the check, we are allowed to create a + temporary wrong IR and fix it later in other places. + + Parameters + ---------- + tensor: Tensor + The tensor to remove index check. + """ + # Monkey patch the indexing function + tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor) + + +def rewrite_compute_body(compute_tensor, new_layout): + """Rewrite the body of a ComputeOp according to a new layout of a placeholder""" + op = compute_tensor.op + + # Get layout free placeholders + layout_free_placeholders = op.attrs["layout_free_placeholders"] + assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder" + placeholder_op = layout_free_placeholders[0].op + + # Rewrite the index expression in body + body = [] + for b in op.body: + body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b)) + op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body) + + num = op_node.num_outputs + outputs = tuple(op_node.output(i) for i in range(num)) + return outputs[0] if num == 1 else outputs + + +def rewrite_tensor_shape(tensor, shape): + """Rewrite the tensor shape""" + _ffi_api.RewriteTensorShape(tensor, shape) + + +def is_auto_scheduler_enabled(): + """Return whether the auto-scheduler is enabled. + + Parameters + ---------- + enabled: bool + Whether the auto-scheduler is enabled + """ + return PassContext.current().config.get( + "relay.backend.use_auto_scheduler", + False, + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/search_policy.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/search_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..4b12c031834ff7129bb88527fedad1b9a8c8d9e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/search_policy.py @@ -0,0 +1,275 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +The search policies of TVM auto-scheduler. + +The auto-scheduler constructs a search space according to the compute declaration. +It then randomly samples programs from the search space and uses evolutionary search with a +learned cost model to fine tune the sampled programs. +The final optimized programs are sent to actual hardware for measurement. +The above process is repeated until the auto-scheduler runs out of time budget. + +Reference: +L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating High-Performance Tensor +Programs for Deep Learning." (OSDI 2020). +""" + +import random + +import tvm._ffi +from tvm.runtime import Object +from .cost_model import RandomModel +from . import _ffi_api + + +@tvm._ffi.register_object("auto_scheduler.SearchCallback") +class SearchCallback(Object): + """Callback function before or after search process""" + + +@tvm._ffi.register_object("auto_scheduler.PreloadMeasuredStates") +class PreloadMeasuredStates(SearchCallback): + """A SearchCallback to load measured states from the log file for a search policy. + + This can resume the state of the search policy: + - Making sure an already measured state in former searches will never be measured again. + - The history states can be used to speed up the search process(e.g. SketchPolicy uses + history states as starting point to perform Evolutionary Search). + + Parameters + ---------- + filename : str + The name of the record file. + """ + + def __init__(self, filename): + self.__init_handle_by_constructor__(_ffi_api.PreloadMeasuredStates, filename) + + +@tvm._ffi.register_object("auto_scheduler.PreloadCustomSketchRule") +class PreloadCustomSketchRule(SearchCallback): + """ + A SearchCallback for SketchSearchPolicy that allows users to add + custom sketch rule. + + Notes + ----- + This is an advanced feature. Make sure you're clear how it works and this should only be used + in SketchSearchPolicy. + + Parameters + ---------- + meet_condition_func: Callable + A function with `(policy, state, stage_id) -> int`. Should return one of the result + enumeration. + apply_func: Callable + A function with `(policy, state, stage_id) -> [[State, int], ...]`. + rule_name: str = "CustomSketchRule" + The name of this custom sketch rule. + """ + + # Result enumeration of the condition function. + PASS = 0 # Skip this rule and continue to try the next rules. + APPLY = 1 # Apply this rule and continue to try the next rules. + APPLY_AND_SKIP_REST = 2 # Apply this rule and skip the rest rules. + + def __init__(self, meet_condition_func, apply_func, rule_name="CustomSketchRule"): + self.__init_handle_by_constructor__( + _ffi_api.PreloadCustomSketchRule, meet_condition_func, apply_func, rule_name + ) + + +@tvm._ffi.register_object("auto_scheduler.SearchPolicy") +class SearchPolicy(Object): + """The base class of search policies.""" + + def continue_search_one_round(self, num_measure, measurer): + """ + Continue the search by doing an additional search round. + + Parameters + ---------- + num_measure: int + The number of programs to measure in this round + measurer: ProgramMeasurer + The program measurer to measure programs + + Returns + ------- + inputs: List[MeasureInput] + The inputs of measurments in this search round + results: List[MeasureResult] + The results of measurments in this search round + """ + return _ffi_api.SearchPolicyContinueSearchOneRound(self, num_measure, measurer) + + def set_verbose(self, verbose): + """ + Set the verbosity level of the search policy. + + Parameters + ---------- + verbose: int + The verbosity level + """ + return _ffi_api.SearchPolicySetVerbose(self, verbose) + + +@tvm._ffi.register_object("auto_scheduler.EmptyPolicy") +class EmptyPolicy(SearchPolicy): + """A simple example of the search policy which always returns + the initial naive schedule (state). + + Parameters + ---------- + task : SearchTask + The SearchTask for the computation declaration. + init_search_callbacks : Optional[List[SearchCallback]] + Callback functions called before the search process. + """ + + def __init__(self, task, init_search_callbacks=None): + self.__init_handle_by_constructor__(_ffi_api.EmptyPolicy, task, init_search_callbacks) + + +@tvm._ffi.register_object("auto_scheduler.SketchPolicy") +class SketchPolicy(SearchPolicy): + """The search policy that searches in a hierarchical search space defined by sketches. + The policy randomly samples programs from the space defined by sketches and use evolutionary + search to fine-tune them. + + Parameters + ---------- + task : SearchTask + The SearchTask for the computation declaration. + program_cost_model : CostModel = RandomModel() + The cost model to estimate the complete schedules. + params : Optional[Dict[str, Any]] + Parameters of the search policy. + See `src/auto_scheduler/search_policy/sketch_search_policy.h` for the definitions. + See `DEFAULT_PARAMS` below to find the default values. + seed : Optional[int] + Random seed. + verbose : int = 1 + Verbosity level. 0 for silent, 1 to output information during schedule search. + init_search_callbacks : Optional[List[SearchCallback]] + Callback functions called before the search process, usually used to do extra + initializations. + Possible callbacks: + + - auto_scheduler.PreloadMeasuredStates + - auto_scheduler.PreloadCustomSketchRule + """ + + DEFAULT_PARAMS = { + "eps_greedy": 0.05, + "retry_search_one_round_on_empty": 1, + "sample_init_min_population": 50, + "sample_init_use_measured_ratio": 0.2, + "evolutionary_search_population": 2048, + "evolutionary_search_num_iters": 4, + "evolutionary_search_mutation_prob": 0.85, + "cpu_multi_level_tiling_structure": "SSRSRS", + "gpu_multi_level_tiling_structure": "SSSRRSRS", + # Notice: the default thread bind policy of GPU assumes the tiling structure to have at + # least 3 spatial tiling levels in outermost + "max_innermost_split_factor": 64, + "max_vectorize_size": 16, + "disable_change_compute_location": 0, + } + + def __init__( + self, + task, + program_cost_model=RandomModel(), + params=None, + seed=None, + verbose=1, + init_search_callbacks=None, + ): + if params is None: + params = SketchPolicy.DEFAULT_PARAMS + else: + for key, value in SketchPolicy.DEFAULT_PARAMS.items(): + if key not in params: + params[key] = value + + self.__init_handle_by_constructor__( + _ffi_api.SketchPolicy, + task, + program_cost_model, + params, + seed or random.randint(1, 1 << 30), + verbose, + init_search_callbacks, + ) + + def generate_sketches(self, print_for_debug=False): + """Generate the sketches. + This python interface is mainly used for debugging and testing. + The actual search is all done in c++. + + Parameters + ---------- + print_for_debug : bool = False + Whether print out the sketches for debug. + + Returns + ------- + sketches : List[State] + The generated sketches of this search task. + """ + sketches = _ffi_api.SketchPolicyGenerateSketches(self) + if print_for_debug: + for i, s in enumerate(sketches): + print("=" * 20 + f" {i} " + "=" * 20) + print(s) + return sketches + + def sample_initial_population(self): + """Sample initial population. + This python interface is mainly used for debugging and testing. + The actual search is all done in c++. + + Returns + ------- + states: List[State] + The sampled states + """ + states = _ffi_api.SketchPolicySampleInitialPopulation(self) + return states + + def evolutionary_search(self, init_populations, out_size): + """Perform evolutionary search. + This python interface is mainly used for debugging and testing. + The actual search is all done in c++. + + Parameters + ---------- + init_populations: List[State] + The initial population states + out_size : int + The size of generated states + + Returns + ------- + states: List[State] + The generated states + """ + states = _ffi_api.SketchPolicyEvolutionarySearch(self, init_populations, out_size) + return states diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/search_task.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/search_task.py new file mode 100644 index 0000000000000000000000000000000000000000..767baf916d58f0aa0c87537fd4fcf617275737d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/search_task.py @@ -0,0 +1,649 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" The definiton of SearchTask """ + +import json + +import os +import logging +import numpy as np + +import tvm._ffi +from tvm.runtime import Object, ndarray + +from tvm.driver.build_module import build +from tvm.target import Target +from .measure import LocalBuilder, LocalRunner +from .measure_record import load_best_record +from .workload_registry import make_workload_key +from .compute_dag import ComputeDAG, LayoutRewriteOption +from .cost_model import XGBModel +from .search_policy import SketchPolicy +from .workload_registry import WORKLOAD_FUNC_REGISTRY, register_workload_tensors +from . import _ffi_api + +# pylint: disable=invalid-name +logger = logging.getLogger("auto_scheduler") + + +@tvm._ffi.register_object("auto_scheduler.HardwareParams") +class HardwareParams(Object): + """The parameters of target hardware used to guide the search policy. + + When a parameter isn't provided, it will instead use the + current machine's default value if target is specified. + TODO(jcf94): This is considered to be merged with the new Target specification: + https://discuss.tvm.apache.org/t/rfc-tvm-target-specification/6844 + Parameters + ---------- + num_cores : int, optional + The number of device cores. + vector_unit_bytes : int, optional + The width of vector units in bytes. + cache_line_bytes : int, optional + The size of cache line in bytes. + max_shared_memory_per_block : int, optional + The max shared memory per block in bytes. + max_local_memory_per_block : int, optional + The max local memory per block in bytes. + max_threads_per_block : int, optional + The max number of threads per block. + max_vthread_extent : int, optional + The max vthread extent. + warp_size : int, optional + The thread numbers of a warp. + target : str or Target, optional + The compilation target. Used to determine default values if provided. + target_host : str or Target, optional + The compilation target host. Used to determine default values if provided. + """ + + def __init__( + self, + num_cores=None, + vector_unit_bytes=None, + cache_line_bytes=None, + max_shared_memory_per_block=None, + max_local_memory_per_block=None, + max_threads_per_block=None, + max_vthread_extent=None, + warp_size=None, + target=None, + target_host=None, + ): + # If target is provided, get the default paramters for this machine. + if target is not None: + if isinstance(target, str): + target = tvm.target.Target(target) + if isinstance(target_host, str): + target_host = tvm.target.Target(target_host) + default_params = _ffi_api.GetDefaultHardwareParams(target, target_host) + + if num_cores is None: + num_cores = default_params.num_cores + if vector_unit_bytes is None: + vector_unit_bytes = default_params.vector_unit_bytes + if cache_line_bytes is None: + cache_line_bytes = default_params.cache_line_bytes + if max_shared_memory_per_block is None: + max_shared_memory_per_block = default_params.max_shared_memory_per_block + if max_local_memory_per_block is None: + max_local_memory_per_block = default_params.max_local_memory_per_block + if max_threads_per_block is None: + max_threads_per_block = default_params.max_threads_per_block + if max_vthread_extent is None: + max_vthread_extent = default_params.max_vthread_extent + if warp_size is None: + warp_size = default_params.warp_size + + self.__init_handle_by_constructor__( + _ffi_api.HardwareParams, + num_cores, + vector_unit_bytes, + cache_line_bytes, + max_shared_memory_per_block, + max_local_memory_per_block, + max_threads_per_block, + max_vthread_extent, + warp_size, + ) + + def __str__(self): + """Pretty printing for hardware parameter configuration.""" + format_str = ( + "HardwareParams:\n" + f" num_cores: {self.num_cores}\n" + f" vector_unit_bytes: {self.vector_unit_bytes}\n" + f" cache_line_bytes: {self.cache_line_bytes}\n" + f" max_shared_memory_per_block: {self.max_shared_memory_per_block}\n" + f" max_local_memory_per_block: {self.max_local_memory_per_block}\n" + f" max_threads_per_block: {self.max_threads_per_block}\n" + f" max_vthread_extent: {self.max_vthread_extent}\n" + f" warp_size: {self.warp_size}\n" + ) + return format_str + + +@tvm._ffi.register_object("auto_scheduler.TuningOptions") +class TuningOptions(Object): + """This controls the options of performance tuning. + + Parameters + ---------- + num_measure_trials: int = 0 + The number of measurement trials. + The search policy measures `num_measure_trials` schedules in total and returns the best one + among them. + With `num_measure_trials` == 0, the policy will do the schedule search but won't involve + measurement. This can be used to get a runnable schedule quickly without auto-tuning. + early_stopping: Optional[int] + Stop the tuning early if getting no improvement after n measurements. + num_measures_per_round: int = 64 + The number of schedules to be measured at each search round. + The whole schedule search process will try a total number of `num_measure_trials` in several + rounds. + verbose: int = 1 + Verbosity level. 0 for silent, 1 to output information during schedule search. + builder: Union[ProgramBuilder, str] = 'local' + ProgramBuilder which builds the program. + runner: Union[ProgramRunner, str] = 'local' + ProgramRunner which runs the program and measures time costs. + measure_callbacks: Optional[List[MeasureCallback]] + Callback functions called after each measurement. + Candidates: + - auto_scheduler.RecordToFile + """ + + def __init__( + self, + num_measure_trials=0, + early_stopping=None, + num_measures_per_round=64, + verbose=1, + builder="local", + runner="local", + measure_callbacks=None, + ): + if isinstance(builder, str): + if builder == "local": + builder = LocalBuilder() + else: + raise ValueError("Invalid builder: " + builder) + elif not isinstance(builder, tvm.auto_scheduler.measure.ProgramBuilder): + raise ValueError( + "Invalid builder: " + + builder + + " . TuningOptions expects a ProgramBuilder or string." + ) + + if isinstance(runner, str): + if runner == "local": + runner = LocalRunner() + else: + raise ValueError("Invalid runner: " + runner) + elif not isinstance(runner, tvm.auto_scheduler.measure.ProgramRunner): + raise ValueError( + "Invalid runner: " + runner + " . TuningOptions expects a ProgramRunner or string." + ) + + self.__init_handle_by_constructor__( + _ffi_api.TuningOptions, + num_measure_trials, + early_stopping or -1, + num_measures_per_round, + verbose, + builder, + runner, + measure_callbacks, + ) + + +# The map stores special registered buffer for measurement. +# This can be used for sparse workloads when we cannot use random tensors for measurment. +# { +# "workload_key_0": { +# "task_input_0": Tensor(...), +# "task_input_1": Tensor(...) +# }, +# "workload_key_1": { +# "task_input_2": Tensor(...), +# "task_input_3": Tensor(...) +# }, +# ... +# } +TASK_INPUT_BUFFER_TABLE = {} + + +def _save_buffer_to_file(buffer_name, buffer_data): + """Save the current Tensor buffer to a numpy file. + + File name will be: {buffer_name}.{buffer_shape}_{buffer_data_type}.npy + """ + np_data = buffer_data.numpy() + + buffer_name += "." + for i in np_data.shape: + buffer_name += f"{i}_" + buffer_name += f"{np_data.dtype}.npy" + + np_data.tofile(buffer_name, " ") + + +def _try_load_buffer_from_file(buffer_name): + """Try to load buffer from a numpy file, if not found, return None. + + File name has a same format as `_save_buffer_to_file`. + """ + filelist = os.listdir() + + for file in filelist: + if file.startswith(buffer_name + "."): + meta_info = file.split(".")[-2].split("_") + shape = [int(i) for i in meta_info[:-1]] + dtype = meta_info[-1] + buffer_data = np.fromfile(file, dtype=dtype, sep=" ") + buffer_data = buffer_data.reshape(shape) + return ndarray.array(buffer_data) + + return None + + +def register_task_input_buffer( + workload_key, input_name, input_data, overwrite=False, save_to_file=False +): + """Register special buffer for measurement. + + Parameters + ---------- + workload_key : str + The workload key of the SearchTask. + + input_name : str + The name of input buffer. + + input_data : tvm.nd.NDArray + The input Tensor data. + + overwrite : bool = False + Whether to overwrite the data if a name has already registered. + + save_to_file : bool = False + Whether to save the data to a local file as well. This can be reused to resume the last + tuning process. + + Returns + ------- + tvm.nd.NDArray + The actual registered Tensor data of this input_name. With `overwrite` set to False, will + return the original one if the name has already registered before. + """ + global TASK_INPUT_BUFFER_TABLE + + if workload_key not in TASK_INPUT_BUFFER_TABLE: + TASK_INPUT_BUFFER_TABLE[workload_key] = {} + input_table = TASK_INPUT_BUFFER_TABLE[workload_key] + + if not overwrite: + if input_name not in input_table.keys(): + # Try to load buffer data from local file + tensor_from_file = _try_load_buffer_from_file(input_name) + if tensor_from_file: + input_table[input_name] = tensor_from_file + elif input_name in input_table.keys(): + raise RuntimeError( + "Tensor %s exists in TASK_INPUT_BUFFER_TABLE, %s" + % (input_name, "set overwrite to True or this Tensor will not be registered") + ) + + input_table[input_name] = input_data + if save_to_file: + _save_buffer_to_file(input_name, input_data) + return input_data + + +def get_task_input_buffer(workload_key, input_name): + """Get special buffer for measurement. + + The buffers are registered by `register_task_input_buffer`. + + Parameters + ---------- + workload_key : str + The workload key of the SearchTask. + + input_name : str + The name of input buffer. + + Returns + ------- + tvm.nd.NDArray + The registered input buffer. + """ + global TASK_INPUT_BUFFER_TABLE + + if workload_key not in TASK_INPUT_BUFFER_TABLE: + TASK_INPUT_BUFFER_TABLE[workload_key] = {} + input_table = TASK_INPUT_BUFFER_TABLE[workload_key] + + if input_name not in input_table: + # Try to load buffer data from local file + tensor_from_file = _try_load_buffer_from_file(input_name) + if tensor_from_file: + input_table[input_name] = tensor_from_file + + # Then check for the default table, the input names extracted from a relay model will be + # stored here for we're not able to get the workload_key at that time + if input_name not in input_table: + input_table = TASK_INPUT_BUFFER_TABLE["default"] + + if input_name in input_table: + return input_table[input_name] + + raise ValueError( + f"{input_name} not found in TASK_INPUT_BUFFER_TABLE, " + f"should provide with `SearchTask(..., task_inputs={{...}})`" + ) + + +@tvm._ffi.register_object("auto_scheduler.SearchTask") +class SearchTask(Object): + """The computation information and hardware parameters for a schedule search task. + + Parameters + ---------- + func : Union[Function, str] + The function that returns the compute declaration Tensors. + Can be the a function or the function name. + args : Union[Tuple[Any, ...], List[Any]] + The args of the function. + compute_dag : ComputeDAG + The ComputeDAG for the corresponding compute declaration. + workload_key : str + The workload key for the corresponding compute declaration. + target : any target-like object, see Target.canon_target + The target device of this search task. + target_host : None or any target-like object, see Target.canon_target + The target host device of this search task. + hardware_params : Optional[HardwareParams] + Hardware parameters used in this search task. + layout_rewrite_option : Optional[LayoutRewriteOption] + The layout rewrite option used for measuring programs. If None, the default value will be + set depending on the specified target. + Auto_scheduler will find a better schedule for the specified layout rewrite option. + The NO_REWRITE and INSERT_TRANSFORM_STAGE are expected to be used when tuning a standalone + op, and the REWRITE_FOR_PRE_TRANSFORMED is expected to be used when tuning ops inside a + network. + task_inputs : Union[Dict[str, tvm.nd.NDArray], List[str]] + A dict maps the input names to input tensors or a list of input names. + Some special Tensor used as inputs in program measuring. Usually we do not need to care + about it, but for special workloads like Sparse computation the Sparse Tensor input are + meaningful that we cannot use random input directly. + task_inputs_overwrite : bool = False + Whether to overwrite the data if a name has already in the global table. + task_inputs_save_to_file : bool = False + Whether to save the data to a local file as well. This can be reused to resume the last + tuning process. + desc: str = "" + The description string of this task. + + Examples + -------- + .. code-block:: python + + # We support two ways to create a search task + + # Way 1: create a task by a workload generation function. + # The `workload_func` is a function decorated by @auto_scheduler.register_workload + task = SearchTask(func=workload_func, args=args, target=target) + + # Way 2: create a task by a workload_key. + # The `workload_key` is a string, which can be either a hash key or a json-serialized + # tuple(func, args). + task = SearchTask(workload_key=workload_key, target=target) + """ + + def __init__( + self, + func=None, + args=None, + compute_dag=None, + workload_key=None, + target=None, + target_host=None, + hardware_params=None, + layout_rewrite_option=None, + task_inputs=None, + task_inputs_overwrite=False, + task_inputs_save_to_file=False, + desc="", + ): + assert ( + func is not None or workload_key is not None + ), "Either a workload generation function or a workload key should be provided" + + if func is not None: + workload_key = make_workload_key(func, args) + if compute_dag is None: + compute_dag = ComputeDAG(workload_key) + + assert target is not None, "Must specify a target." + + target, target_host = Target.canon_target_and_host(target, target_host) + + if layout_rewrite_option is None: + layout_rewrite_option = LayoutRewriteOption.get_target_default(target) + + task_input_names = [] + if isinstance(task_inputs, list): + task_input_names = task_inputs + elif isinstance(task_inputs, dict): + for input_name in task_inputs: + register_task_input_buffer( + workload_key, + input_name, + task_inputs[input_name], + task_inputs_overwrite, + task_inputs_save_to_file, + ) + task_input_names.append(input_name) + elif task_inputs is not None: + raise ValueError("task_inputs should be a dict or a list.") + + self.__init_handle_by_constructor__( + _ffi_api.SearchTask, + compute_dag, + workload_key, + target, + target_host, + hardware_params, + layout_rewrite_option, + task_input_names, + desc, + ) + + def tune(self, tuning_options, search_policy=None, adaptive_training=False): + """Run auto scheduling search for a task + + Parameters + ---------- + tuning_options : TuningOptions + Tuning and measurement options. + search_policy : Optional[SearchPolicy] + The search policy to be used for schedule search. + """ + if search_policy is None: + cost_model = XGBModel(adaptive_training=adaptive_training) + search_policy = SketchPolicy(self, cost_model) + + _ffi_api.AutoSchedule(search_policy, tuning_options) + + def apply_best(self, log_file, include_compatible=False, layout_rewrite_option=None): + """Apply the history best from a log file and return the schedule. + + Parameters + ---------- + log_file : str + The name of the log file. + include_compatible: bool + When set to True, all compatible records in the log file will be considered. + layout_rewrite_option : Optional[LayoutRewriteOption] + The layout rewrite option. + + + Returns + ------- + A `te.Schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`. + """ + inp, _ = load_best_record( + log_file, self.workload_key, include_compatible=include_compatible + ) + if inp is None: + raise RuntimeError( + f"Cannot find any valid schedule for {self.workload_key} in file {log_file}" + ) + + sch, args = self.compute_dag.apply_steps_from_state( + inp.state, layout_rewrite_option or self.layout_rewrite_option + ) + return sch, args + + def print_best(self, log_file, print_mode="schedule"): + """Print the best schedule as python schedule API code or CUDA source code. + + Parameters + ---------- + log_file : str + The name of the log file + print_mode: str + if "schedule", print the best schedule as python schedule API code. + if "cuda", print the best schedule as CUDA source code. + + Returns + ------- + code: str + The best schedule code in python API or CUDA source code + """ + inp, _ = load_best_record(log_file, self.workload_key) + if inp is None: + raise RuntimeError( + f"Cannot find any valid schedule for {self.workload_key} in file {log_file}" + ) + + if print_mode == "schedule": + return self.compute_dag.print_python_code_from_state(inp.state) + if print_mode == "cuda": + assert self.target.kind.name == "cuda" + sch, args = self.compute_dag.apply_steps_from_state(inp.state) + func = build(sch, args, "cuda") + return func.imported_modules[0].get_source() + raise ValueError(f"Invalid print_mode: {print_mode}") + + def __getstate__(self): + self.target, self.target_host = Target.canon_target_and_host(self.target, self.target_host) + return { + "compute_dag": self.compute_dag, + "workload_key": self.workload_key, + "target": self.target, + "target_host": self.target_host, + "hardware_params": self.hardware_params, + "layout_rewrite_option": self.layout_rewrite_option, + "task_input_names": self.task_input_names, + "desc": self.desc, + } + + def __setstate__(self, state): + # Register the workload if needed + try: + workload = json.loads(state["workload_key"]) + except Exception: # pylint: disable=broad-except + raise RuntimeError(f"Invalid workload key {state['workload_key']}") + + # workload[0] is either the compute function name or the ComputeDAG hash. + # The compute functions are already registered when importing TVM, so here + # we only register the ComputeDAG workloads. If the same workload has + # already been registered, the later registration overrides the previous one. + if workload[0] not in WORKLOAD_FUNC_REGISTRY: + register_workload_tensors(state["workload_key"], state["compute_dag"].tensors) + + state["target"], state["target_host"] = Target.canon_target_and_host( + state["target"], state["target_host"] + ) + self.__init_handle_by_constructor__( + _ffi_api.SearchTask, + state["compute_dag"], + state["workload_key"], + state["target"], + state["target"].host, + state["hardware_params"], + state["layout_rewrite_option"], + state["task_input_names"], + state["desc"], + ) + + +def create_task(func, args, target, target_host=None, hardware_params=None): + """THIS API IS DEPRECATED. + + Create a search task. + + Parameters + ---------- + func : Union[Function, str] + The function that returns the compute declaration Tensors. + Can be the a function or the function name. + args : Union[Tuple[Any, ...], List[Any]] + The args of the function. + target : Union[tvm.target.Target, str] + The target device of this search task. + target_host : Optional[Union[tvm.target.Target, str]] + The target host device of this search task. + hardware_params : Optional[HardwareParams] + Hardware parameters used in this search task. + + Returns + ------- + SearchTask: the created task + """ + raise ValueError( + 'The API "auto_scheduler.create_task" is deprecated.' + "See https://github.com/apache/tvm/pull/7028 for the upgrade guide" + ) + + +def auto_schedule(task, search_policy=None, tuning_options=TuningOptions()): + """THIS API IS DEPRECATED. + + Run auto scheduling search for a task. + + Parameters + ---------- + task : SearchTask + The SearchTask for the computation declaration. + search_policy : Optional[SearchPolicy] + The search policy to be used for schedule search. + tuning_options : Optional[TuningOptions] + Tuning and measurement options. + + Returns + ------- + A `te.Schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`. + """ + raise ValueError( + 'The API "auto_scheduler.create_task" is deprecated.' + "See https://github.com/apache/tvm/pull/7028 for the upgrade guide." + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/task_scheduler.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/task_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..58457daad0b68db0bda75cf2285648b18c20efad --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/task_scheduler.py @@ -0,0 +1,658 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name + +""" The task scheduler that allocates the time resources when tuning multiple tasks together + +The details of the "gradient" strategy below can be found in the section 6 of this paper: +L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating High-Performance Tensor +Programs for Deep Learning." (OSDI 2020). +""" +import os +import time +import math +import logging + +import numpy as np + +from .search_policy import SearchPolicy, SketchPolicy, PreloadMeasuredStates +from .cost_model import RandomModel, XGBModel +from .utils import array_mean +from .measure import ProgramMeasurer +from .measure_record import RecordReader +from . import _ffi_api + +logger = logging.getLogger("auto_scheduler") + + +def make_search_policies( + search_policy, + search_policy_params, + tasks, + num_measures_per_round, + verbose, + load_model_file=None, + load_log_file=None, + adaptive_training=False, +): + """Make a list of search policies for a list of search tasks. + It creates one policy per task. + + Parameters + ---------- + search_policy: Union[str, List[SearchPolicy]] + The name of search policy. + search_policy_params: Dict[str, Any]] + The parameters of the search policy. + tasks: List[SearchTask] + The list of all tasks + num_measures_per_round: int + The number of schedules to be measured at each search round. + This should be the same as `TuningOptions.num_measures_per_round` + verbose: int + The verbosity level. 0 for silent. + load_model_file: Optional[str] + Load pre-trained model from this file. If this is None, the cost model will + be trained from scratch. + load_log_file: Optional[str] + Load measurement records from this file. If it is not None, the status of the + task scheduler, search policies and cost models will be restored according to this file. + adaptive_training: bool = False + Option used by XGBModel to reduce the model training frequency when there're too + many logs. + + Returns + ------- + policies: List[SearchPolicy] + The list of search policies + """ + if search_policy == "default": + search_policy = "sketch.xgb" + + if isinstance(search_policy, str): + policy_type, model_type = search_policy.split(".") + if model_type == "xgb": + cost_model = XGBModel( + num_warmup_sample=len(tasks) * num_measures_per_round, + model_file=load_model_file, + adaptive_training=adaptive_training, + ) + if load_model_file and os.path.isfile(load_model_file): + logger.info("TaskScheduler: Load pretrained model...") + cost_model.load(load_model_file) + elif load_log_file: + logger.info("TaskScheduler: Reload measured states and train the model...") + cost_model.update_from_file(load_log_file) + elif model_type == "random": + cost_model = RandomModel() + else: + raise ValueError("Invalid search policy: " + search_policy) + + if policy_type == "sketch": + if load_log_file: + # use the log file to restore the status of search policies. + init_search_callbacks = [PreloadMeasuredStates(load_log_file)] + else: + init_search_callbacks = None + search_policies = [ + SketchPolicy( + task, + cost_model, + params=search_policy_params, + verbose=verbose, + init_search_callbacks=init_search_callbacks, + ) + for task in tasks + ] + else: + raise ValueError("Invalid search policy: " + search_policy) + else: + # check type + assert isinstance(search_policy, (tuple, list)) + for item in search_policy: + assert isinstance(item, SearchPolicy) + search_policies = search_policy + + return search_policies + + +def derive_similarity_tag(dag, log_base=1.618): + """Derive the tag for similarity check from one computational DAG. + The DAGs with the same tag are considered as similar tasks. + + The tag format is _ ... . + + If the tag is "", then the task is not considered to be similar to any other tasks. + + Parameters + ---------- + dag: ComputeDAG + The input computational DAG + log_base: float = 1.618 + The base of log to normalize FLOPS + + Returns + ------- + tag: str + The tag of this computational DAG. + """ + ret = "" + for op in dag.ops: + tag = op.attrs.get("auto_scheduler_task_scheduler_tag", None) + if tag: + ret += op.attrs["auto_scheduler_task_scheduler_tag"] + "_" + if ret: + ret += "%d" % int(math.log(dag.flop_ct + 1, log_base)) + return ret + + +class TaskScheduler: + """ + Allocate the time resources when tuning multiple tasks together. + This implements two strategies: "round-robin" and "gradient". + + Parameters + ---------- + tasks: List[SearchTask] + All tasks to tune + task_weights: Optional[List[float]] + The weights of tasks. + If provided, the task scheduler will set the objective function to + sum(weight[t] * latency[t]), where weight[t] is the weight of a task + and the lantecy[t] is the lantecy of the task. + If not provided, the task scheduer will assign equal weights to all + tasks (i.e., the objective function is sum(latency[t])). + objective_func: Optional[Callable[List[float] -> float]] + The objective function to be minimized. + The objective function accepts the current latencies of all tasks and returns the + objective. + If not provided, the objective is the weighted sum of the latencies of all tasks. + strategy: str = "gradient" + The scheduling strategy. + "round-robin": Tune tasks in round robin order. + "gradient" : Tune tasks with gradient descent. + load_model_file: Optional[str] + Load pre-trained model from this file. If this is None, the cost model will + be trained from scratch. + load_log_file: Optional[str] + Load measurement records from this file. If it is not None, the status of the + task scheduler, search policies and cost models will be restored according to this file. + verbose: int = 1 + The level of verbosity. 0 means silent. + alpha: float = 0.2 + The parameter used for 'gradient' strategy + beta: float = 2 + The parameter used for 'gradient' strategy + backward_window_size: int = 3 + The parameter used for 'gradient' strategy + callbacks: Optional[List[TaskSchedulerCallback]] + The task scheduler callbacks that will be called before and after tuning a task. + If None, PrintTableInfo and LogEstimatedLatency callback will be used. + """ + + def __init__( + self, + tasks, + task_weights=None, + objective_func=None, + strategy="gradient", + load_model_file: str = None, + load_log_file: str = None, + alpha: float = 0.2, + beta: float = 2, + gamma: float = 0.5, + backward_window_size: int = 3, + callbacks=None, + ): + self.tasks = tasks + if objective_func: # use custom objective function + self.objective_func = objective_func + else: # use weighted sum + if task_weights: + self.objective_func = lambda costs: sum(c * w for c, w in zip(costs, task_weights)) + else: + self.objective_func = sum + + self.strategy = strategy + self.load_log_file = load_log_file + self.load_model_file = load_model_file + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.backward_window_size = backward_window_size + self.callbacks = ( + callbacks + if callbacks is not None + else [PrintTableInfo(), LogEstimatedLatency("total_latency.tsv")] + ) + + assert len(self.tasks) != 0, "No tasks" + assert self.strategy in ["round-robin", "gradient"] + + # task_cts[i] saves how many times task i is tuned + self.task_cts = [0 for _ in range(len(self.tasks))] + + # task_best_cts[i] saves the round task i found the best latency + self.task_best_cts = [0 for _ in range(len(self.tasks))] + + # task_costs_history[i] saves the latency history of task i + self.task_costs_history = [[] for _ in range(len(self.tasks))] + + # best_costs[i] saves the best latency of task i + self.best_costs = 1e10 * np.ones(len(self.tasks)) + self.cur_score = self._compute_score(self.best_costs) + + self.tune_option = self.measurer = self.search_policies = None + self.ct = self.best_ct = self.best_score = self.tic = None + self.num_measures_per_round = None + self.dead_tasks = set() + + # Build similarity groups + self.task_tags = [] # task_id -> tag + self.tag_to_group_id = {} # tag -> group_id + self.group_task_ids = [] # group_id -> all task ids in this group + self.flop_cts = [] # task_id -> the number of floating ops + for i, task in enumerate(self.tasks): + tag = derive_similarity_tag(task.compute_dag) + self.task_tags.append(tag) + self.flop_cts.append(task.compute_dag.flop_ct) + if not tag: + continue + + if tag not in self.tag_to_group_id: + self.tag_to_group_id[tag] = len(self.tag_to_group_id) + self.group_task_ids.append([]) + self.group_task_ids[self.tag_to_group_id[tag]].append(i) + + def tune( + self, + tune_option, + search_policy="default", + search_policy_params=None, + adaptive_training=False, + per_task_early_stopping=None, + ): + """Tune a batch of tasks together. + + Parameters + ---------- + tune_option: TuningOptions + The tuning options applied to all tasks. + search_policy: : Union[str, List[SearchPolicy]] = "default" + The list of search policies. + If it is str, + "default" for the default policy (SketchPolicy + XGBModel), + "sketch.xgb" for SketchPolicy + XGBModel, + "sketch.random" for SketchPolicy + RandomModel. + search_policy_params : Optional[Dict[str, Any]] + The parameters of the search policy + adaptive_training : bool = False + Option used by XGBModel to reduce the model training frequency when there're + too many logs. + per_task_early_stopping : Optional[int] + Stop tuning a task early if getting no improvement after n measurements. + """ + # init members + self.tune_option = tune_option + self.early_stopping_all = ( + 1e20 if tune_option.early_stopping < 0 else tune_option.early_stopping + ) + self.early_stopping_task = ( + 1e20 if per_task_early_stopping is None else per_task_early_stopping + ) + + self.measurer = ProgramMeasurer( + tune_option.builder, + tune_option.runner, + tune_option.measure_callbacks, + tune_option.verbose, + ) + self.ct = self.best_ct = 0 + self.tic = time.time() + + # reset num_measures_per_round to make sure every task is tuned at least once + self.num_measures_per_round = min( + tune_option.num_measures_per_round, tune_option.num_measure_trials // len(self.tasks) + ) + if self.num_measures_per_round <= 0: + raise ValueError( + "num_measure_trials is too small. Please set it to a higher value." + f"It should be at least {len(self.tasks)} for this model." + ) + + # restore the status of the task scheduler from a log file + if self.load_log_file: + self._restore_status(self.load_log_file, self.num_measures_per_round) + + # make one search policy for one task + self.search_policies = make_search_policies( + search_policy, + search_policy_params, + self.tasks, + self.num_measures_per_round, + tune_option.verbose, + self.load_model_file, + self.load_log_file, + adaptive_training, + ) + + # do a round robin first to warm up + for idx in range(len(self.tasks)): + # skip warming up this task if it has been tuned before (restored from the log file) + if not self.task_cts[idx]: + self._tune_task(idx) + self.best_ct = self.ct + self.best_score = self.cur_score + + # put task without schedule on warm up to dead state + for task_idx, cost in enumerate(self.best_costs): + if cost == 1e10: + self.dead_tasks.add(task_idx) + + # use the specific strategy to choose workload to tune + task_idx = -1 + while self.ct < tune_option.num_measure_trials and len(self.dead_tasks) < len(self.tasks): + if self.strategy == "round-robin": + task_idx = (task_idx + 1) % len(self.tasks) + while task_idx in self.dead_tasks: + task_idx = (task_idx + 1) % len(self.tasks) + elif self.strategy == "gradient": + gradients = [] + + for i in range(len(self.tasks)): + if i in self.dead_tasks: + gradients.append(0) + continue + + # compute gradient from chain rule : (delta f / delta g_i) + delta = 1e-4 + new_costs = list(self.best_costs) + new_costs[i] -= delta + chain_grad = ( + self._compute_score(self.best_costs) - self._compute_score(new_costs) + ) / delta + + # compute (g_i(t_i) - g(t_i - \Delta t)) / (\Delta t) + if ( + self.task_cts[i] - 1 < len(self.task_costs_history[i]) + and self.task_cts[i] - 1 - self.backward_window_size >= 0 + ): + backward_grad = ( + self.task_costs_history[i][self.task_cts[i] - 1] + - self.task_costs_history[i][ + self.task_cts[i] - 1 - self.backward_window_size + ] + ) / self.backward_window_size + else: + backward_grad = 0 + + # compute (g_i(t_i + \Delta t) - g(t_i)) / (\Delta t) + g_next_1 = self.best_costs[i] - (self.best_costs[i] / self.task_cts[i]) + + g_next_2 = self.beta * 1e30 + group_id = self.tag_to_group_id.get(self.task_tags[i], None) + if group_id is not None and len(self.group_task_ids[group_id]) > 1: + best_flops = max( + [ + self.flop_cts[j] / self.best_costs[j] + for j in self.group_task_ids[group_id] + ] + ) + g_next_2 = self.beta * self.flop_cts[i] / best_flops + + g_next = min(g_next_1, g_next_2) + forward_grad = g_next - self.best_costs[i] + + # combine all grads + grad = chain_grad * ( + self.alpha * backward_grad + (1 - self.alpha) * forward_grad + ) + assert grad <= 0 + gradients.append(grad) + + if max(gradients) == min(gradients): + task_idx = np.random.choice(len(gradients)) + else: + task_idx = np.argmin(gradients) + else: + raise ValueError("Invalid strategy: " + self.strategy) + + self._tune_task(task_idx) + self._adjust_similarity_group(task_idx) + + if self.cur_score < self.best_score: + self.best_score = self.cur_score + self.best_ct = self.ct + elif self.ct - self.best_ct >= self.early_stopping_all and all( + cost < 1e9 for cost in self.best_costs + ): + if self.tune_option.verbose >= 1: + print( + "Stop early since no performance improvement in the last " + + str(self.early_stopping_all) + + " measurement trials." + ) + break + + def _tune_task(self, task_idx): + """Tune the select task for one round""" + + # Run pre-tune callbacks + for callback in self.callbacks: + callback.pre_tune(self, task_idx) + + measure_inputs, measure_results = self.search_policies[task_idx].continue_search_one_round( + self.num_measures_per_round, self.measurer + ) + + self.task_cts[task_idx] += 1 + + for res in measure_results: + cost = array_mean(res.costs) + if cost < self.best_costs[task_idx]: + self.task_best_cts[task_idx] = self.task_cts[task_idx] + self.best_costs[task_idx] = cost + + # Stop tuning this task in the rest of the process if its search space has been + # fully explored or it has no improvement for a long while. + no_change_trials = ( + self.task_cts[task_idx] - self.task_best_cts[task_idx] + ) * self.num_measures_per_round + if len(measure_inputs) == 0 or no_change_trials > self.early_stopping_task: + self.dead_tasks.add(task_idx) + + self.task_costs_history[task_idx].append(self.best_costs[task_idx]) + + self.ct += len(measure_inputs) + self.cur_score = self._compute_score(self.best_costs) + + # Run post-tune callbacks + for callback in self.callbacks: + callback.post_tune(self, task_idx) + + def _compute_score(self, costs): + """compute the objective function""" + # Make sure to return float. + score = self.objective_func(costs) + return score.value if hasattr(score, "value") else score + + def _adjust_similarity_group(self, task_idx): + """adjust the similarity group for the selected task""" + group_id = self.tag_to_group_id.get(self.task_tags[task_idx], None) + if group_id is None or len(self.group_task_ids[group_id]) <= 1: + return + + group_ids = self.group_task_ids[group_id] + best_group_flops = max([self.flop_cts[j] / self.best_costs[j] for j in group_ids]) + cur_flops = self.flop_cts[task_idx] / self.best_costs[task_idx] + + # if we tune a task for many times but it still cannot achieve + # a similar speed to the fastest one in its group, this means this task + # is actually not similar to other tasks in its group. + # So we will remove it from its original group. + if cur_flops < best_group_flops / self.beta and self.task_cts[task_idx] > 5 + max( + self.task_cts[j] for j in group_ids if j != task_idx + ): + self.task_tags[task_idx] = None + group_ids.remove(task_idx) + + def _restore_status(self, log_file, num_measures_per_round): + """restore task_cts and best_costs from a log file""" + str_target = str(self.tasks[0].target) + workload_key_to_task_id = {t.workload_key: i for i, t in enumerate(self.tasks)} + total_ct = -1 + + for total_ct, (inp, res) in enumerate(RecordReader(log_file)): + if str(inp.task.target) != str_target: + continue + task_idx = workload_key_to_task_id.get(inp.task.workload_key, None) + if task_idx is None: + continue + + self.task_cts[task_idx] += 1 + + if res.error_no == 0: + cost = array_mean(res.costs) + if cost < self.best_costs[task_idx]: + self.best_costs[task_idx] = cost + self.task_best_cts[task_idx] = self.task_cts[task_idx] + + for idx in range(len(self.tasks)): + if self.task_cts[idx] - self.task_best_cts[idx] > self.early_stopping_task: + self.dead_tasks.add(idx) + + # The computation of taks_cts is just an estimation. + # The estimation may not be accurate if the log file is changed externally or + # `num_measures_per_round` is different from the last tuning. + self.task_cts[idx] = int(self.task_cts[idx] / num_measures_per_round + 0.5) + self.task_best_cts[idx] = int(self.task_best_cts[idx] / num_measures_per_round + 0.5) + self.task_costs_history[idx].append(self.best_costs[idx]) + + self.cur_score = self._compute_score(self.best_costs) + + logger.info("TaskScheduler: Loaded %d measurement records from %s", total_ct + 1, log_file) + + +class TaskSchedulerCallback: + """The base class of task scheduler callback functions.""" + + def pre_tune(self, task_scheduler, task_id): + """The callback before tuning each task. + + Parameters + ---------- + task_scheduler: TaskScheduler + The task scheduler. + task_id: int + The task ID going to be tuned. + """ + # Do nothing by default + + def post_tune(self, task_scheduler, task_id): + """The callback after tuning each task. + + Parameters + ---------- + task_scheduler: TaskScheduler + The task scheduler. + task_id: int + The task ID be tuned. + """ + # Do nothing by default + + +class PrintTableInfo(TaskSchedulerCallback): + """The callback that prints a table of current progress.""" + + def pre_tune(self, task_scheduler, task_id): + if task_scheduler.tune_option.verbose < 1: + return + + _ffi_api.PrintTitle("Task Scheduler") + print( + "| ID " + "| Task Description " + "| Latency (ms) | Speed (GFLOPS) | Trials |" + ) + print( + "----------------------------------------------------------------" + "-------------------------------------------------" + ) + + # content + for i in range(len(task_scheduler.tasks)): + id_str = f"{i}" + latency_str = ( + "%.3f" % (1e3 * task_scheduler.best_costs[i]) + if task_scheduler.best_costs[i] < 1e9 + else "-" + ) + task_desc = task_scheduler.tasks[i].desc + speed_str = ( + "%.2f" + % (task_scheduler.tasks[i].compute_dag.flop_ct / task_scheduler.best_costs[i] / 1e9) + if task_scheduler.best_costs[i] < 1e9 + else "-" + ) + trials_str = "%d" % (task_scheduler.task_cts[i] * task_scheduler.num_measures_per_round) + print( + "| %4s | %61s | %12s | % 14s | %6s |" + % (id_str, task_desc, latency_str, speed_str, trials_str) + ) + print( + "----------------------------------------------------------------" + "-------------------------------------------------" + ) + + # overall info + if all(cost < 1e9 for cost in task_scheduler.best_costs): + total_latency_str = "%.3f" % (task_scheduler.cur_score * 1e3) + else: + total_latency_str = "-" + print( + "Estimated total latency: %s ms\tTrials: %d\tUsed time : %.0f s\tNext ID: %d\t" + % (total_latency_str, task_scheduler.ct, time.time() - task_scheduler.tic, task_id) + ) + + +class LogEstimatedLatency(TaskSchedulerCallback): + """Log the estimated latency to the file after tuning a task. + + Parameters + ---------- + log_file: str + The log file path. + """ + + def __init__(self, log_file): + if os.path.exists(log_file): # Remove existing log + os.remove(log_file) + + self.log_file = log_file + + def post_tune(self, task_scheduler, task_id): + if all(cost < 1e9 for cost in task_scheduler.best_costs): + total_latency_str = "%.3f" % (task_scheduler.cur_score * 1e3) + else: + total_latency_str = "N/A" + + with open(self.log_file, "a") as filep: + filep.write( + "ElapsedTime(s)\t%.0f\tEstimatedLatency(ms)\t%s\tTrials\t%d\n" + % (time.time() - task_scheduler.tic, total_latency_str, task_scheduler.ct) + ) + filep.flush() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2bbcf8317de301120189589dd53f6174e7b6668e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__init__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-import, redefined-builtin +"""Testing utilities in auto scheduler.""" + +# NOTE: Do not import any module here by default diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5773b956bcf3d4f5df3886b28cd42839a4256f4a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_onnx.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_onnx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f642b49d973abd1a3ba14c70bc83f8cbfad4f82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_onnx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_relay.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_relay.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91bb5dc33c57a3bda0159aec4bfa0456db9d14b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_relay.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_te.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_te.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afb688fc3f3907859a346bccc3626c411dd34b87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/__pycache__/tune_te.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_onnx.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..a3299c05bb821d2f7a2681f8b9f96eef4ba4d6fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_onnx.py @@ -0,0 +1,234 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=missing-docstring +from distutils.util import strtobool +import argparse +import json +import os +import onnx # type: ignore + +import tvm +from tvm import auto_scheduler +from tvm import meta_schedule as ms +from tvm import relay +from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc +from tvm.meta_schedule.testing.tune_utils import generate_input_data, create_timer +from tvm.meta_schedule.utils import cpu_count +from tvm.relay.frontend import from_onnx +from tvm.support import describe + + +def _parse_args(): + args = argparse.ArgumentParser() + args.add_argument( + "--model-name", + type=str, + required=True, + ) + args.add_argument( + "--onnx-path", + type=str, + required=True, + ) + args.add_argument( + "--input-shape", + type=str, + required=True, + help='example: `[{"name": "input1", "dtype": "int64", "shape": [1, 1, 8]}]', + ) + args.add_argument( + "--target", + type=str, + required=True, + ) + args.add_argument( + "--num-trials", + type=int, + required=True, + ) + args.add_argument( + "--rpc-host", + type=str, + required=True, + ) + args.add_argument( + "--rpc-port", + type=int, + required=True, + ) + args.add_argument( + "--rpc-key", + type=str, + required=True, + ) + args.add_argument( + "--work-dir", + type=str, + required=True, + ) + args.add_argument( + "--number", + type=int, + default=3, + ) + args.add_argument( + "--repeat", + type=int, + default=1, + ) + args.add_argument( + "--min-repeat-ms", + type=int, + default=100, + ) + args.add_argument( + "--adaptive-training", + type=lambda x: bool(strtobool(x)), + help="example: True / False", + default=True, + ) + args.add_argument( + "--cpu-flush", + type=lambda x: bool(strtobool(x)), + help="example: True / False", + required=True, + ) + args.add_argument( + "--backend", + type=str, + choices=["graph", "vm"], + help="example: graph / vm", + required=True, + ) + parsed = args.parse_args() + parsed.target = tvm.target.Target(parsed.target) + parsed.input_shape = json.loads(parsed.input_shape) + parsed.rpc_config = ms.runner.RPCConfig( + tracker_host=parsed.rpc_host, + tracker_port=parsed.rpc_port, + tracker_key=parsed.rpc_key, + session_timeout_sec=600, + ) + return parsed + + +ARGS = _parse_args() + + +def main(): + log_file = os.path.join(ARGS.work_dir, f"{ARGS.model_name}.json") + + runner = auto_scheduler.RPCRunner( + key=ARGS.rpc_key, + host=ARGS.rpc_host, + port=ARGS.rpc_port, + n_parallel=cpu_count(logical=True), + number=ARGS.number, + repeat=ARGS.repeat, + min_repeat_ms=ARGS.min_repeat_ms, + enable_cpu_cache_flush=ARGS.cpu_flush, + timeout=ARGS.rpc_config.session_timeout_sec, + ) + + if ARGS.target.kind.name == "llvm": + hardware_params = auto_scheduler.HardwareParams( + num_cores=int(ARGS.target.attrs["num-cores"]), + target=ARGS.target, + ) + elif ARGS.target.kind.name == "cuda": + hardware_params = auto_scheduler.HardwareParams( + num_cores=-1, + vector_unit_bytes=16, + cache_line_bytes=64, + max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]), + max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]), + # The value `max_local_memory_per_block` is not used in AutoScheduler, + # but is required by the API. + max_local_memory_per_block=12345678, + max_vthread_extent=8, + warp_size=32, + ) + else: + raise NotImplementedError(f"Unsupported target {ARGS.target}") + + describe() + print(f"Workload: {ARGS.model_name}") + onnx_model = onnx.load(ARGS.onnx_path) + shape_dict = {} + for item in ARGS.input_shape: + print(f" input_name : {item['name']}") + print(f" input_shape: {item['shape']}") + print(f" input_dtype: {item['dtype']}") + shape_dict[item["name"]] = item["shape"] + mod, params = from_onnx(onnx_model, shape_dict, freeze_params=True) + input_data = { + item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in ARGS.input_shape + } + + with ms.Profiler() as profiler: + tasks, task_weights = auto_scheduler.extract_tasks( + mod["main"], + params, + target=ARGS.target, + hardware_params=hardware_params, + ) + for idx, (task, task_weight) in enumerate(zip(tasks, task_weights)): + print( + f"==== Task {idx}: {task.desc} " + f"(weight {task_weight} key: {task.workload_key}) =====" + ) + print(task.compute_dag) + + if ARGS.num_trials > 0: + tuner = auto_scheduler.TaskScheduler(tasks, task_weights) + tuner.tune( + auto_scheduler.TuningOptions( + num_measure_trials=ARGS.num_trials, + runner=runner, + measure_callbacks=[ + auto_scheduler.RecordToFile(log_file), + ], + ), + adaptive_training=ARGS.adaptive_training, + ) + + relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend] + with auto_scheduler.ApplyHistoryBest(log_file): + with tvm.transform.PassContext( + opt_level=3, + config={"relay.backend.use_auto_scheduler": True}, + ): + lib = relay_build( + mod, + target=ARGS.target, + params=params, + ) + print("Tuning Time:") + print(profiler.table()) + + run_module_via_rpc( + rpc_config=ARGS.rpc_config, + lib=lib, + dev_type=ARGS.target.kind.name, + args=input_data, + continuation=create_timer(ARGS.backend), + backend=ARGS.backend, + ) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_relay.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_relay.py new file mode 100644 index 0000000000000000000000000000000000000000..9773fbbc65ad7673fff640381f9af9a96159f6e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_relay.py @@ -0,0 +1,249 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=missing-docstring +import argparse +import json +import os +from distutils.util import strtobool + +import tvm +from tvm import auto_scheduler +from tvm import meta_schedule as ms +from tvm import relay +from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc +from tvm.meta_schedule.testing.relay_workload import get_network +from tvm.meta_schedule.testing.tune_utils import create_timer, generate_input_data +from tvm.meta_schedule.utils import cpu_count +from tvm.support import describe + + +def _parse_args(): + args = argparse.ArgumentParser() + args.add_argument( + "--workload", + type=str, + required=True, + ) + args.add_argument( + "--input-shape", + type=str, + required=True, + ) + args.add_argument( + "--target", + type=str, + required=True, + ) + args.add_argument( + "--num-trials", + type=int, + required=True, + ) + args.add_argument( + "--rpc-host", + type=str, + required=True, + ) + args.add_argument( + "--rpc-port", + type=int, + required=True, + ) + args.add_argument( + "--rpc-key", + type=str, + required=True, + ) + args.add_argument( + "--work-dir", + type=str, + required=True, + ) + args.add_argument( + "--layout", + type=str, + default=None, + ) + args.add_argument( + "--cache-dir", + type=str, + default=None, + ) + args.add_argument( + "--number", + type=int, + default=3, + ) + args.add_argument( + "--repeat", + type=int, + default=1, + ) + args.add_argument( + "--min-repeat-ms", + type=int, + default=100, + ) + args.add_argument( + "--adaptive-training", + type=lambda x: bool(strtobool(x)), + help="example: True / False", + default=True, + ) + args.add_argument( + "--cpu-flush", + type=lambda x: bool(strtobool(x)), + help="example: True / False", + required=True, + ) + args.add_argument( + "--backend", + type=str, + choices=["graph", "vm"], + help="example: graph / vm", + required=True, + ) + parsed = args.parse_args() + parsed.target = tvm.target.Target(parsed.target) + parsed.input_shape = json.loads(parsed.input_shape) + parsed.rpc_config = ms.runner.RPCConfig( + tracker_host=parsed.rpc_host, + tracker_port=parsed.rpc_port, + tracker_key=parsed.rpc_key, + session_timeout_sec=600, + ) + return parsed + + +ARGS = _parse_args() + + +def main(): + log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json") + + runner = auto_scheduler.RPCRunner( + key=ARGS.rpc_key, + host=ARGS.rpc_host, + port=ARGS.rpc_port, + n_parallel=cpu_count(logical=True), + number=ARGS.number, + repeat=ARGS.repeat, + min_repeat_ms=ARGS.min_repeat_ms, + enable_cpu_cache_flush=ARGS.cpu_flush, + timeout=ARGS.rpc_config.session_timeout_sec, + ) + + if ARGS.target.kind.name == "llvm": + hardware_params = auto_scheduler.HardwareParams( + num_cores=int(ARGS.target.attrs["num-cores"]), + target=ARGS.target, + ) + elif ARGS.target.kind.name == "cuda": + hardware_params = auto_scheduler.HardwareParams( + num_cores=-1, + vector_unit_bytes=16, + cache_line_bytes=64, + max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]), + max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]), + # The value `max_local_memory_per_block` is not used in AutoScheduler, + # but is required by the API. + max_local_memory_per_block=12345678, + max_vthread_extent=8, + warp_size=32, + ) + else: + raise NotImplementedError(f"Unsupported target {ARGS.target}") + + describe() + print(f"Workload: {ARGS.workload}") + mod, params, (input_name, input_shape, input_dtype) = get_network( + ARGS.workload, + ARGS.input_shape, + layout=ARGS.layout, + cache_dir=ARGS.cache_dir, + ) + input_info = [ + { + "name": input_name, + "shape": input_shape, + "dtype": input_dtype, + }, + ] + input_data = { + item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in input_info + } + for item in input_info: + print(f" input_name : {item['name']}") + print(f" input_shape: {item['shape']}") + print(f" input_dtype: {item['dtype']}") + + with ms.Profiler() as profiler: + with ms.Profiler.timeit("TaskExtraction"): + tasks, task_weights = auto_scheduler.extract_tasks( + mod["main"], + params, + target=ARGS.target, + hardware_params=hardware_params, + ) + for idx, (task, task_weight) in enumerate(zip(tasks, task_weights)): + print( + f"==== Task {idx}: {task.desc} " + f"(weight {task_weight} key: {task.workload_key}) =====" + ) + print(task.compute_dag) + + with ms.Profiler.timeit("Tuning"): + if ARGS.num_trials > 0: + tuner = auto_scheduler.TaskScheduler(tasks, task_weights) + tuner.tune( + auto_scheduler.TuningOptions( + num_measure_trials=ARGS.num_trials, + runner=runner, + measure_callbacks=[ + auto_scheduler.RecordToFile(log_file), + ], + ), + adaptive_training=ARGS.adaptive_training, + ) + + relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend] + with ms.Profiler.timeit("PostTuningCompilation"): + with auto_scheduler.ApplyHistoryBest(log_file): + with tvm.transform.PassContext( + opt_level=3, + config={"relay.backend.use_auto_scheduler": True}, + ): + lib = relay_build( + mod, + target=ARGS.target, + params=params, + ) + print("Tuning Time:") + print(profiler.table()) + + run_module_via_rpc( + rpc_config=ARGS.rpc_config, + lib=lib, + dev_type=ARGS.target.kind.name, + args=input_data, + continuation=create_timer(ARGS.backend), + backend=ARGS.backend, + ) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_te.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_te.py new file mode 100644 index 0000000000000000000000000000000000000000..da3584512dd020dc2e53619a536422d1caecccc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/testing/tune_te.py @@ -0,0 +1,186 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=missing-docstring +from distutils.util import strtobool +import argparse +import os + +import tvm +from tvm import auto_scheduler +from tvm import meta_schedule as ms +from tvm.meta_schedule.testing.te_workload import CONFIGS +from tvm.meta_schedule.utils import cpu_count +from tvm.support import describe + + +def _parse_args(): + args = argparse.ArgumentParser() + args.add_argument( + "--workload", + type=str, + required=True, + ) + args.add_argument( + "--target", + type=str, + required=True, + ) + args.add_argument( + "--num-trials", + type=int, + required=True, + ) + args.add_argument( + "--rpc-host", + type=str, + required=True, + ) + args.add_argument( + "--rpc-port", + type=int, + required=True, + ) + args.add_argument( + "--rpc-key", + type=str, + required=True, + ) + args.add_argument( + "--work-dir", + type=str, + required=True, + ) + args.add_argument( + "--number", + type=int, + default=3, + ) + args.add_argument( + "--repeat", + type=int, + default=1, + ) + args.add_argument( + "--min-repeat-ms", + type=int, + default=100, + ) + args.add_argument( + "--adaptive-training", + type=lambda x: bool(strtobool(x)), + required=False, + help="example: True / False", + default=True, + ) + args.add_argument( + "--cpu-flush", + type=lambda x: bool(strtobool(x)), + help="example: True / False", + required=True, + ) + parsed = args.parse_args() + parsed.target = tvm.target.Target(parsed.target) + parsed.rpc_config = ms.runner.RPCConfig( + tracker_host=parsed.rpc_host, + tracker_port=parsed.rpc_port, + tracker_key=parsed.rpc_key, + session_timeout_sec=60, + ) + return parsed + + +ARGS = _parse_args() + + +def main(): + log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json") + + runner = auto_scheduler.RPCRunner( + key=ARGS.rpc_key, + host=ARGS.rpc_host, + port=ARGS.rpc_port, + n_parallel=cpu_count(logical=True), + number=ARGS.number, + repeat=ARGS.repeat, + min_repeat_ms=ARGS.min_repeat_ms, + enable_cpu_cache_flush=ARGS.cpu_flush, + timeout=ARGS.rpc_config.session_timeout_sec, + ) + + if ARGS.target.kind.name == "llvm": + hardware_params = auto_scheduler.HardwareParams( + num_cores=int(ARGS.target.attrs["num-cores"]), + target=ARGS.target, + ) + elif ARGS.target.kind.name == "cuda": + hardware_params = auto_scheduler.HardwareParams( + num_cores=-1, + vector_unit_bytes=16, + cache_line_bytes=64, + max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]), + max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]), + # The value `max_local_memory_per_block` is not used in AutoScheduler, + # but is required by the API. + max_local_memory_per_block=12345678, + max_vthread_extent=8, + warp_size=32, + ) + else: + raise NotImplementedError(f"Unsupported target {ARGS.target}") + + describe() + print(f"Workload: {ARGS.workload}") + with ms.Profiler() as profiler: + # Same as MetaSchedule Tune TE + # Does not count ApplyHistoryBest time + + workload_func, params = CONFIGS[ARGS.workload] + params = params[0] # type: ignore + workload_func = auto_scheduler.register_workload(workload_func) + + task = auto_scheduler.SearchTask( + func=workload_func, + args=params, + target=ARGS.target, + hardware_params=hardware_params, + ) + # Inspect the computational graph + print("Computational DAG:") + print(task.compute_dag) + tune_option = auto_scheduler.TuningOptions( + num_measure_trials=ARGS.num_trials, + measure_callbacks=[auto_scheduler.RecordToFile(log_file)], + verbose=2, + runner=runner, + ) + if ARGS.num_trials > 0: + print("Running AutoTuning:") + task.tune(tune_option, adaptive_training=ARGS.adaptive_training) + + print("Tuning Time:") + print(profiler.table()) + + print("History Best:") + print(task.print_best(log_file)) + + sch, args = task.apply_best(log_file) + print("Lowered TIR:") + print(tvm.lower(sch, args, simple_mode=True)) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/utils.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4d05fc4856c8485dad8b8c301ce2954472ed0d85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/utils.py @@ -0,0 +1,410 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name + +""" Common utilities for auto_scheduler. """ + +from typing import Hashable +import json +import signal +import threading +import traceback +import os + +import numpy as np + +try: + import psutil +except ImportError: + psutil = None + +import tvm +from tvm import rpc +from tvm.tir import expr +from tvm.tir.transform import Simplify +from tvm.ir.transform import Sequential +from ..te import Tensor, placeholder + + +def decode_workload_key(workload_key): + """Decode the workload key from a string to the name and arguments. The wokrload key + is expected to be a list of "[func_name/hash, args ...]" in a JSON string. If not, + then simply return the workload key as the name without arguments. + + Parameters + ---------- + workload_key: str + The workload key in string. Format: "[func_name/hash, args ...]". + + Returns + ------- + name: str + The workload function name or the DAG hash. + args: Optional[Tuple[Any, ...]] + The flatten arguments in a tuple, or None if the workload key format is not decodeable. + """ + + def flatten_list(inp): + ret = [] + for elt in inp: + if isinstance(elt, list): + ret += flatten_list(elt) + else: + ret.append(elt) + return ret + + try: + key_list = json.loads(workload_key) + if isinstance(key_list, list) and len(key_list) >= 1: + return key_list[0], tuple(flatten_list(key_list[1:])) + except json.decoder.JSONDecodeError: + pass + return workload_key, None + + +def calc_workload_dis_factor(target_workload_pair, workload_pair): + """Calculate the distance factor of the workload to the target workload. + If two workloads are not compatible at all (i.e., different compute DAG or function), + then the distance factor is "inf". Otherwise, we calculate the factor by traversing + the workload arguments, which are the arguments of the compute function, + or the output shapes for the ComputeDAG. The factor is calculated by the following rules: + + 1. For non-zero integer values: `product(target_arg / candidate_arg)`. + 2. For non-integer or zero values: "inf" if not equal else 1. + + As a result, factor=1 is the optimal when two workloads are identical. + + Parameters + ---------- + target_workload_pair: Tuple[str, Optional[Tuple[Any, ...]]] + The target workload pair: (hash, argument tuple). + + workload_pair: Tuple[str, Optional[Tuple[Any, ...]]] + The candidate workload pair: (hash, argument tuple). + + Returns + ------- + dis_f: float + The distance factor. + """ + target_key, target_args = target_workload_pair + target_args = target_args if target_args is not None else [] + key, args = workload_pair + args = args if args is not None else [] + + # Not even the same func/DAG. + if key != target_key or len(target_args) != len(args): + return float("inf") + + dis_f = 1 + for target_arg, arg in zip(target_args, args): + if isinstance(target_arg, int): + if target_arg == 0 or arg == 0: + if target_arg != arg: + return float("inf") + elif target_arg % arg != 0: + return float("inf") + else: + dis_f *= target_arg / arg + elif target_arg != arg: + return float("inf") + return dis_f + + +def get_func_name(func): + """Get name of a function. + + Parameters + ---------- + func: Function + The input function. + + Returns + ------- + name: str + The function name. + """ + return func.func_name if hasattr(func, "func_name") else func.__qualname__ + + +def get_const_int(exp): + """Verifies expr is integer and get the constant value. + + Parameters + ---------- + exp : Union[tvm.tir.expr, int] + The input expression. + + Returns + ------- + out_value : int + The output. + """ + if isinstance(exp, int): + return exp + if not isinstance(exp, expr.IntImm): + opt = Sequential([Simplify()]) + exp = opt(exp) + if not isinstance(exp, expr.IntImm): + raise ValueError("Expect value to be constant int") + return exp.value + + +def get_const_tuple(in_tuple): + """Verifies input tuple is IntImm, returns tuple of int. + + Parameters + ---------- + in_tuple : Tuple[tvm.tir.expr] + The input. + + Returns + ------- + out_tuple : Tuple[Union[int,tvm.tir.Var,tvm.tir.Any]] + The output tuple of int. The dynamic shape variables (Var or Any) will be preserved. + """ + ret = [] + for elem in in_tuple: + if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)): + ret.append(elem) + else: + ret.append(get_const_int(elem)) + return tuple(ret) + + +def list_to_tuple(x): + """Convert a list to a tuple recursively.""" + assert isinstance(x, list) + return tuple(list_to_tuple(y) if isinstance(y, list) else y for y in x) + + +def serialize_args(args): + """ + Serialize arguments of a function to a hashable and jsonable tuple. + Currently this is mainly used for tvm.tensor.Tensor + """ + ret = [] + if args is None: + return tuple(ret) + + for t in args: + if isinstance(t, Tensor): + t = ("TENSOR", get_const_tuple(t.shape), t.dtype) + elif isinstance(t, list): + t = list_to_tuple(t) + + assert isinstance(t, Hashable), str(t) + " is not hashable" + ret.append(t) + + return tuple(ret) + + +def deserialize_args(args): + """The inverse function of :code:`serialize_args`""" + ret = [] + for t in args: + if isinstance(t, (tuple, list)) and t[0] == "TENSOR": + ret.append(placeholder(shape=t[1], dtype=t[2])) + else: + ret.append(t) + return ret + + +def kill_child_processes(parent_pid, sig=signal.SIGTERM): + """kill all child processes recursively""" + if not psutil: + raise ImportError("psutil not found, try `pip install psutil` to fix this") + + try: + parent = psutil.Process(parent_pid) + except psutil.NoSuchProcess: + return + + try: + children = parent.children(recursive=True) + for process in children: + process.send_signal(sig) + except psutil.NoSuchProcess: + return + + +# The maximum length of traceback information +MAX_TRACEBACK_INFO_LEN = 512 + + +def make_traceback_info(): + """Get the error message from traceback.""" + info = str(traceback.format_exc()) + if len(info) > MAX_TRACEBACK_INFO_LEN: + info = ( + info[: MAX_TRACEBACK_INFO_LEN // 2] + "\n...\n" + info[-MAX_TRACEBACK_INFO_LEN // 2 :] + ) + return info + + +class PropagatingThread(threading.Thread): + """A thread that propagates the exception to the main thread""" + + def run(self): + self.exc = None + try: + self.ret = self._target(*self._args, **self._kwargs) + except Exception as e: # pylint: disable=broad-except + self.exc = e + + def join(self, timeout=None): + super(PropagatingThread, self).join(timeout) + if self.exc: + raise self.exc + return self.ret + + +def call_func_with_thread(func, args, kwargs): + """Call a function within a new thread""" + res = [] + + def wrapper(): + res.append(func(*args, **kwargs)) + + t = PropagatingThread(target=wrapper) + t.start() + t.join() + return res[0] + + +def call_func_with_timeout( + worker, timeout, func, args=(), kwargs=None +): # pylint: disable=unused-argument + """Call a function with timeout""" + worker.send(func, args, kwargs, timeout) + try: + res = worker.recv() + except Exception: # pylint: disable=broad-except + res = Exception(make_traceback_info()) + + return res + + +def request_remote(device_key, host=None, port=None, priority=1, timeout=60): + """Request a remote session. + + Parameters + ---------- + device_key : str + The device key of registered device in tracker. + host : Optional[str] + The host address of rpc tracker. + If is none, will use environment variable "TVM_TRACKER_HOST". + port : Optional[int] + The port of rpc tracker. + If is none, will use environment variable "TVM_TRACKER_PORT". + priority : int = 1 + The priority of this request, larger is more prior. + timeout : int = 60 + The timeout of this session in second. + + Returns + ------- + remote : RPCSession + The connected remote RPCSession. + """ + # connect to the tracker + host = host or os.environ["TVM_TRACKER_HOST"] + port = port or int(os.environ["TVM_TRACKER_PORT"]) + + tracker = rpc.connect_tracker(host, port) + remote = tracker.request(device_key, priority=priority, session_timeout=timeout) + return remote + + +def check_remote(device_key, host=None, port=None, priority=100, timeout=10): + """ + Check the availability of a remote device. + + Parameters + ---------- + device_key: str + device key of registered device in tracker. + host: Optional[str] + The host address of rpc tracker. + If is none, will use environment variable "TVM_TRACKER_HOST". + port: Optional[int] + The port address of rpc tracker. + If is none, will use environment variable "TVM_TRACKER_PORT". + priority: int = 100 + The priority of this request, larger is more prior. + timeout: int = 10 + The timeout of this check in seconds. + + Returns + ------- + available: bool + True if can find available device. + """ + + def _check(): + request_remote(device_key, host, port, priority) + + t = threading.Thread(target=_check) + t.start() + t.join(timeout) + return not t.is_alive() + + +def array_mean(arr): + """Compute mean of the elments in a TVM Array + + Parameters + ---------- + arr: Array + A TVM Array + + Returns + ------- + mean: float + The mean of the elements in the array + """ + return sum(x.value for x in arr) / len(arr) + + +def to_str_round(x, decimal=6): + """Convert an object to str and round float numbers + + Parameters + ---------- + x: Union[str, list, int, float, np.ndarray] + The input object + decimal: int + The precision of decimal fraction + + Returns + ------- + ret: str + The string format of these objects + """ + if isinstance(x, str): + return x + if isinstance(x, (list, tuple, np.ndarray)): + return "[" + ", ".join([to_str_round(y, decimal=decimal) for y in x]) + "]" + if isinstance(x, dict): + return str({k: to_str_round(v) for k, v in x.items()}) + if isinstance(x, int): + return str(x) + if isinstance(x, (np.float32, np.float64, float)): + format_str = f"%.{decimal}f" + return format_str % x + raise ValueError(f"Invalid value: {str(x)}\ttype: {type(x)}") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/workload_registry.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/workload_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..62ba2245b002b5752ca91b6a2068b9c0a2f92460 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/auto_scheduler/workload_registry.py @@ -0,0 +1,280 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name + +""" +Workload registration and serialization. + +We use a json string to represent a workload (a computation graph). +The format of the string is `[func_name, [args...]]`. +The dag should be the return value of this `func_name(*args)`. + +Rationale: The workload is actually a compute dag defined by tvm dsl. But serializing compute dags +and matching them efficiently is not easy. Therefore, we use the above string to encode a compute +dag. +These strings are efficient for serialization/matching and won't be too long. +When we need the dag, we decode the string and call the function, which will return the dag. +""" + +import json +import logging +import pickle + +import tvm._ffi +from tvm.runtime._ffi_node_api import LoadJSON, SaveJSON + +from .utils import deserialize_args, get_func_name, serialize_args + +logger = logging.getLogger("auto_scheduler") + +# Global workload function and hash key registry +# It stores two types of workload: +# 1. User registered tasks. This type of workload is registered +# by the decorator "register_workload" +# 2. Extracted tasks from a relay program. This type of workload is +# registered by function "register_workload_tensors". +# +# For 1, the dictionary maps a function name to its function pointer +# For 2, the dictionary maps a hash key to a list of input/output tensors +WORKLOAD_FUNC_REGISTRY = {} + + +def register_workload(func_name, f=None, override=False): + """Register a function that generates a certain workload. + + The input function should take hashable and jsonable arguments + (int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of tvm.tensor.Tensor. + + Parameters + ---------- + func_name : Union[Function, str] + The generation function that returns the compute declaration Tensors or its function name. + f : Optional[Function] + The generation function to be registered. + override : boolean = False + Whether to override existing entry. + + Examples + -------- + .. code-block:: python + + @auto_scheduler.register_workload + def matmul(N, M, K): + A = te.placeholder((N, K), name='A') + B = te.placeholder((K, M), name='B') + k = te.reduce_axis((0, K), name='k') + C = te.compute((N, M), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name='C') + return [A, B, C] + """ + global WORKLOAD_FUNC_REGISTRY + + if callable(func_name): + f = func_name + func_name = get_func_name(f) + if not isinstance(func_name, str): + raise ValueError("expect string function name") + + def register(myf): + """internal register function""" + if func_name in WORKLOAD_FUNC_REGISTRY and not override: + raise RuntimeError(f"{func_name} has been registered already") + WORKLOAD_FUNC_REGISTRY[func_name] = myf + return myf + + if f: + return register(f) + return register + + +def register_workload_tensors(workload_key, tensors, override=True): + """Register a workload by provding input/output tensors. Since this function is used + when extracting/deserializing tasks, it expects duplicated registrations by default. + + Parameters + ---------- + workload_key: str + The wokrload key of the compute DAG in JSON string. + tensors: List[Tensor] + The input/output tensors of a compute DAG + override : boolean = True + Whether to override existing entry. + + Returns + ------- + workload_key: str + The wokrload key of the compute DAG in JSON string. + """ + register_workload(workload_key, override=override)(tensors) + return workload_key + + +def make_workload_key(func, args): + """Make a workload key by function and arguments. + + Parameters + ---------- + func : Union[Function, str] + The function that returns the compute declaration Tensors. + Can be the a function or the function name. + args : Args + The args of the function. + + Returns + ------- + workload_key : str + The workload key of the function. + """ + global WORKLOAD_FUNC_REGISTRY + + if callable(func): + func_name = get_func_name(func) + elif isinstance(func, str): + func_name = func + else: + raise ValueError( + "Invalid function: " + + str(func) + + " . `make_workload_key` expects a callable function or its function name" + ) + + if not func_name in WORKLOAD_FUNC_REGISTRY: + raise ValueError( + f"{func} is not registered. " + f"Please register it with @auto_scheduler.register_workload" + ) + + args = serialize_args(args) + + return json.dumps((func_name,) + args) + + +@tvm._ffi.register_func("auto_scheduler.workload_key_to_tensors") +def workload_key_to_tensors(workload_key): + """Get the input/output tensors from the workload key. + + This method is usually used to create a ComputeDAG by workload key. + + Parameters + ---------- + workload_key : str + The input workload key in JSON string. The format is either (func_name, arguments...) + for compute functions, or (hash, shapes...) for ComputeDAG. + + Returns + ------- + tensors : List[Tensor] + The registered compute declaration Tensors. + """ + global WORKLOAD_FUNC_REGISTRY + + # We register ComputeDAG with both hash and argumetns, which are fixed in ComputeDAG, + # so we use an entire workload key to query the ComputeDAG. + if workload_key in WORKLOAD_FUNC_REGISTRY: + return WORKLOAD_FUNC_REGISTRY[workload_key] + + # We register compute function with only the function name since + # it does not bind to specific arguments, so we use the function name to query + # the function and call the function with arguments to get the tensors. + workload = json.loads(workload_key) + name = workload[0] + value = WORKLOAD_FUNC_REGISTRY[name] + assert callable(value) + + args = deserialize_args(workload[1:]) + result = value(*args) + if isinstance(result, tuple): + result = list(result) + return result + + +def serialize_workload_registry_entry(workload_key): + """ + Serialize a workload registry entry. + + This is used when the start method of multiprocessing is spawn. + We need to serialize the entry and register it in the new processes. + + Parameters + ---------- + workload_key : str + The workload key + + Returns + ------- + data: Tuple + The serialized pickable data + """ + global WORKLOAD_FUNC_REGISTRY + + if workload_key in WORKLOAD_FUNC_REGISTRY: + sname = workload_key + else: + workload = json.loads(workload_key) + sname = workload[0] + + svalue = WORKLOAD_FUNC_REGISTRY[sname] + if not callable(svalue): + # pylint: disable=assignment-from-no-return + svalue = SaveJSON(svalue) + + return sname, svalue + + +def deserialize_workload_registry_entry(data): + """ + Deserialize a workload registry entry. + This should be used along with :code:`serialize_workload_registry_entry` + + Parameters + ---------- + data: Tuple + The return value of :code:`serialize_workload_registry_entry` + """ + global WORKLOAD_FUNC_REGISTRY + + name, value = data + if name not in WORKLOAD_FUNC_REGISTRY: + # pylint: disable=assignment-from-no-return + if not callable(value): + value = LoadJSON(value) + WORKLOAD_FUNC_REGISTRY[name] = value + + +def save_workload_func_registry(filename): + """Dump workload function registry to a pickle binary file. + + Parameters + ---------- + filename : str + The filename to dump workload function registry to. + """ + global WORKLOAD_FUNC_REGISTRY + + pickle.dump(WORKLOAD_FUNC_REGISTRY, open(filename, "wb")) + + +def load_workload_func_registry(filename): + """Load workload function registry from a pickle binary file. + + Parameters + ---------- + filename : str + The filename to load workload function registry from. + """ + global WORKLOAD_FUNC_REGISTRY + + WORKLOAD_FUNC_REGISTRY = pickle.load(open(filename, "rb")) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5a7d00960ecd8f067bbcc1e88f324c6acd707543 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__init__.py @@ -0,0 +1,65 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""The auto-tuning module of tvm + +This module includes: + +* Tuning space definition API + +* Efficient auto-tuners + +* Tuning result and database support + +* Distributed measurement to scale up tuning +""" + +from . import database +from . import feature +from . import measure +from . import record +from . import task +from . import tuner +from . import utils +from . import env +from . import tophub + +# some shortcuts +from .measure import ( + measure_option, + MeasureInput, + MeasureResult, + MeasureErrorNo, + LocalBuilder, + LocalRunner, + RPCRunner, +) +from .tuner import callback +from .task import ( + get_config, + create, + ConfigSpace, + ConfigEntity, + register_topi_compute, + register_topi_schedule, + template, + DispatchContext, + FallbackContext, + ApplyHistoryBest as apply_history_best, + ApplyGraphBest as apply_graph_best, + ApplyFixedConfig as apply_fixed_config, +) +from .env import GLOBAL_SCOPE diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a45691b1eff5f10a0d4dc72652c91084e500694 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0186aecada80297c69f136fc61aa294986f1f250 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/database.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/database.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51ea28181a50d75581700234948b8150641e44b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/database.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/database.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/database.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4865426d777dd453e243082150ca96b1d3138ac3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/database.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/env.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb9a60a9b279e2ac21db72107c8760fa9ff20c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/env.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/env.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/env.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..121a6aa808f079ca252ae76e4c6f37a40cc52d8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/env.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/feature.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/feature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2757c0ab9470a09072877d5b620a1076e5f1c2d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/feature.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/feature.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/feature.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27daa06514cef213e1b9bacb5b7eb2fdfbdfc684 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/feature.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/record.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/record.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51fea56f8dbdf0921fbc794be34caea295b38003 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/record.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/record.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/record.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0460b409b1a1059d5186cb94a432d55632c7dc39 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/record.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/tophub.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/tophub.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0203c49a906d52d39709c8e56342d216c2389cd3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/tophub.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/tophub.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/tophub.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ab050fe7eecf97158f60880bf5fb1fff84ed6dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/tophub.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d9c57c8bae2b3207b9eea1406be8fdbeea1daa4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/utils.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60f2833053c78f64145bf37eb9effdb1f9865323 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/__pycache__/utils.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/database.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/database.py new file mode 100644 index 0000000000000000000000000000000000000000..7246f81d6a59c0d0d22eec2a45747d861f8f652f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/database.py @@ -0,0 +1,204 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=consider-using-enumerate, invalid-name, use-list-literal +""" +Database of MeasureInput/MeasureResult pair. +This can be used for replaying measurement. +""" +import os + +from .record import encode, decode, measure_str_key + + +class Database(object): + """ + Base class for a record database object. + """ + + def load(self, inp, get_all=False): + """ + Load a result based on an input's string key + + Parameters + ---------- + inp: MeasureInput + to be translated into key for RedisDB + get_all: bool, optional + Whether the latest result (or all matching results) should be returned + + Returns + ------- + rec: MeasureResult if previously saved, otherwise None + """ + raise NotImplementedError() + + def save(self, inp, res, extend=False): + """ + Save a result based on an input's string key + + Parameters + ---------- + inp: MeasureInput + to be translated into key for RedisDB + res: MeasureResult + to associate with key + extend: + Whether to extend existing MeasureResults if they exist + """ + raise NotImplementedError() + + +def filter_inputs(db, measure_inputs, retry=False): + """ + Filter a measure_inputs batch based on saved db results + + Parameters + ---------- + db: Database + database object + measure_inputs: Array of MeasureInput + measure_inputs as expected in measure_batch + retry: bool + whether to retry if the saved result is a failure + + Returns + ------- + partial_results: Array of MeasureResult + a full list of result, where None denotes no corresponding saved result + unsaved: Array of MeasureInput + a list that only contains unsaved inputs + """ + partial_results = list() + unsaved = list() + for inp in measure_inputs: + res = db.load(inp) + if res is None or (retry and res.error_no != 0): + unsaved.append(inp) + partial_results.append(None) + else: + partial_results.append(res) + return partial_results, unsaved + + +class RedisDatabase(Database): + """ + Redis version of record database + """ + + REDIS_PROD = 15 + REDIS_LOCA = 14 + REDIS_TEST = 13 # for unit test + REDIS_NIGHT_TEMP = 12 # for nightly report (will be flushed after every workload) + + MAGIC_SPLIT = "$" + + def __init__(self, db_index=REDIS_PROD): + # pylint: disable=import-outside-toplevel + import redis + + if db_index == RedisDatabase.REDIS_TEST: + host = "127.0.0.1" + else: + host = os.environ.get("TVM_FLEET_HOST") + self.db = redis.StrictRedis(host=host, port=6379, db=db_index) + self.db_index = db_index + + def set(self, key, value): + self.db.set(key, value) + + def get(self, key): + current = self.db.get(key) + return current.decode() if isinstance(current, bytes) else current + + def load(self, inp, get_all=False): + current = self.get(measure_str_key(inp)) + if current is not None: + records = [decode(x) for x in current.split(RedisDatabase.MAGIC_SPLIT)] + results = [rec[1] for rec in records if rec is not None] + if get_all: + return results + return max(results, key=lambda result: result.timestamp) + return current + + def save(self, inp, res, extend=False): + current = self.get(measure_str_key(inp)) + if not extend or current is None: + self.set(measure_str_key(inp), RedisDatabase.MAGIC_SPLIT.join([encode(inp, res)])) + else: + current = current.split(RedisDatabase.MAGIC_SPLIT) + self.set( + measure_str_key(inp), RedisDatabase.MAGIC_SPLIT.join(current + [encode(inp, res)]) + ) + + def filter(self, func): + """ + Dump all of the records that match the given rule + + Parameters + ---------- + func: callable + The signature of the function is (MeasureInput, [MeasureResult]) -> bool + + Returns + ------- + list of records in tuple (MeasureInput, MeasureResult) matching the rule + + Examples + -------- + get records for a target + >>> db.filter(lambda inp, results: "cuda" in inp.target.keys) + get records with errors + >>> db.filter(lambda inp, results: any(r.error_no != 0 for r in results)) + """ + matched_records = list() + # may consider filtering in iterator in the future + for key in self.db.keys(): + current = self.get(key) + try: + records = [decode(x) for x in current.split(RedisDatabase.MAGIC_SPLIT)] + records = [rec for rec in records if rec is not None] + except TypeError: # got a badly formatted/old format record + continue + + if not records: + continue + inps, results = zip(*records) + inp = inps[0] + if not func(inp, results): + continue + result = max(results, key=lambda res: res.timestamp) + matched_records.append((inp, result)) + return matched_records + + def flush(self): + self.db.flushdb() + + +class DummyDatabase(RedisDatabase): + """ + A database based on python dictionary for testing. + """ + + def __init__(self): + # pylint: disable=super-init-not-called + self.db = {} + + def set(self, key, value): + self.db[key] = value + + def flush(self): + self.db = {} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/env.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/env.py new file mode 100644 index 0000000000000000000000000000000000000000..52ec8828bc1e58f9c89233ef7ecb9436e9169b82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/env.py @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=global-variable-not-assigned +"""Global configuration/variable scope for autotvm""" + + +class AutotvmGlobalScope(object): + """The global autotvm scope.""" + + current = None + + def __init__(self): + self._old = AutotvmGlobalScope.current + AutotvmGlobalScope.current = self + + self.in_tuning = False + self.silent = False + + def deep_copy(self, global_scope): + """Deep copy from another instance of AutotvmGlobalScope.""" + self._old = AutotvmGlobalScope.current + + self.in_tuning = global_scope.in_tuning + self.silent = global_scope.silent + + +GLOBAL_SCOPE = AutotvmGlobalScope() + + +def reset_global_scope(global_scope): + """Reset global autotvm state. This is needed to initialize PopenPool workers.""" + global GLOBAL_SCOPE + GLOBAL_SCOPE.deep_copy(global_scope) + AutotvmGlobalScope.current = global_scope diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/feature.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/feature.py new file mode 100644 index 0000000000000000000000000000000000000000..1b66d79d0e5eaac6844c2de71978f14afeb78e21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/feature.py @@ -0,0 +1,214 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, +"""Extract feature of iter vars + +There are two types of feature +1) Itervar feature + This feature is extracted based on loop variables. + Different loop structures will result in different shapes of feature +2) Curve sample feature (relation feature) + This feature is extracted by sampling relation curve. + This feature is invariant of loop structure. +""" + +import struct +import numpy as np +import tvm._ffi + +from tvm.target import Target +from tvm.driver import build_module + + +def ana_lower(sch, args, binds=None, simple_mode=True): + """Do lower while keeping all axes in IR + i.e. Do not eliminate loop with extent of 1, do not vectorize, unroll or inject virtual threads + """ + sch = sch.normalize() + # Phase 0 + context = tvm.transform.PassContext(config={"tir.debug_keep_trivial_loop": True}) + with context: + mod = build_module.schedule_to_module(sch, args, binds=binds) + + mod = tvm.tir.transform.StorageFlatten(64)(mod._move()) + mod = tvm.tir.transform.Simplify()(mod._move()) + assert simple_mode + return mod["main"].body + + +try: + _get_buffer_curve_sample_flatten = tvm._ffi.get_global_func( + "autotvm.feature.GetCurveSampleFeatureFlatten" + ) + _get_itervar_feature = tvm._ffi.get_global_func("autotvm.feature.GetItervarFeature") + _get_itervar_feature_flatten = tvm._ffi.get_global_func( + "autotvm.feature.GetItervarFeatureFlatten" + ) +except ValueError as e: + + def raise_error(*args, **kwargs): # pylint: disable=unused-argument + raise RuntimeError("Cannot load autotvm c++ API") + + _get_buffer_curve_sample_flatten = ( + _get_itervar_feature + ) = _get_itervar_feature_flatten = raise_error + + +def get_itervar_feature(sch, args, take_log=False): + """get features of iter vars + + Parameters + ---------- + sch: tvm.te.schedule.Schedule + args: Array of te.tensor.Tensor + the buffer args for lower + take_log: bool + whether take log of numerical statics + + Returns + ------- + features of every axis in the IR, see doc/features.md for detail + """ + stmt = ana_lower(sch, args, simple_mode=True) + feas = _get_itervar_feature(stmt, take_log) + + # convert tvm node to python type + ret = [] + for row in feas: + tmp = [] + tmp.append([row[0][0].value, row[0][1]]) + for item in row[1:]: + tmp.append([item[0].value] + [x.value for x in item[1:]]) + ret.append(tmp) + return ret + + +def flatten_itervar_feature(fea): + """flatten features into one-dimensional feature vectors + + Parameters + ---------- + fea: list + return value of get_itervar_feature + + Returns + ------- + flatten_feature: np.ndarray + one-dimensional vector + """ + flatten = [] + for axis in fea: + for pair in axis[1:]: + flatten.append(pair[1:]) + return np.concatenate(flatten) + + +def get_itervar_feature_flatten(sch, args, take_log=True): + """get flatten features of iter vars + this is equivalent to get_itervar_feature + flatten_itervar_feature, but much faster. + + Parameters + ---------- + sch: tvm.te.schedule.Schedule + args: Array of te.tensor.Tensor + the buffer args for lower + take_log: bool + whether take log of numerical statics + + Returns + ------- + flatten_feature: np.ndarray + one-dimensional vector + """ + stmt = ana_lower(sch, args, simple_mode=True) + feas = _get_itervar_feature_flatten(stmt, take_log) + feas = struct.unpack(f"{len(feas) // 4}f", feas) + return feas + + +def get_flatten_name(fea): + """Get names of feature after flatten. + + Parameters + ---------- + fea: list or str + return value of get_itervar_feature or a line of logfile + + Returns + ------- + feature_names: Array of str + """ + + feature_name = { + "_attr_": ["length", "nest_level", "topdown", "bottomup"] + [f"ann_{i}" for i in range(20)], + "_arith_": ["add", "mul", "div"], + "buf_touch": ["stride", "mod", "count", "reuse", "T_count", "T_reuse"], + } + + if isinstance(fea, str): + # pylint: disable=import-outside-toplevel + from .record import decode + + # flatten line to feature + line = fea + ret = decode(line) + if ret is None: + raise ValueError("Unsupported AutoTVM log format") + inp, _ = ret + target = Target(inp.target) + with target: + s, args = inp.template.instantiate(inp.config) + fea = get_itervar_feature(s, args) + + names = [] + ct = 0 + for row in fea: + var_name = str(row[0][1]) + for pair in row[1:]: + key = pair[0] + if key in feature_name: + name_list = feature_name[key] + else: + name_list = feature_name["buf_touch"] + + for i in range(len((pair[1:]))): + names.append(".".join([f"f{ct}", var_name, key, name_list[i]])) + ct += 1 + return names + + +def get_buffer_curve_sample_flatten(sch, args, sample_n=30): + """ + Get flatten curve sample feature (relation feature) + + Parameters + ---------- + sch: tvm.te.schedule.Schedule + args: Array of te.tensor.Tensor + the buffer args for lower + sample_n: int + number of sample points along one dimension + + Returns + ------- + flatten_feature: np.ndarray + one-dimensional vector + """ + stmt = ana_lower(sch, args, simple_mode=True) + feas = _get_buffer_curve_sample_flatten(stmt, sample_n, False) + feas = struct.unpack(f"{len(feas) // 4}f", feas) + return feas diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d590db0e7c48e2b7bc5a66e850b80c0d471de692 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__init__.py @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Autotvm graph tuner API.""" +from __future__ import absolute_import as _abs + +from . import _base +from . import base_graph_tuner + +from .base_graph_tuner import BaseGraphTuner +from .dynamic_programming_tuner import DPTuner +from .pbqp_tuner import PBQPTuner diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82894da040ea22375340091d17302667058e0634 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3acdbca687c63048404a43f0c7645a20ad1b0da Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/base_graph_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/base_graph_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77d6e44123b68dd2c3451e9453ef5c8a3cc2691e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/base_graph_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/dynamic_programming_stage.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/dynamic_programming_stage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caa0841dcc377ea7e5a3be8dbe9a12689bdc05e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/dynamic_programming_stage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/dynamic_programming_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/dynamic_programming_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21ef71ba758ac08cf17472de6d04eb8f30271ee4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/dynamic_programming_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/pbqp_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/pbqp_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9af2ca52b083ea5b8beef6c6c7d764bd7566947d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/__pycache__/pbqp_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/_base.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..ae220bb5e2f86f5ba953606834857e67e38c4236 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/_base.py @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Helper functions and global data""" + + +# We set a large time to represent an invalid layout-transformation. +# This number is set to be 10e9 seconds to align with autotvm. +INVALID_LAYOUT_TIME = 10e9 + +MAX_OUTPUT_NODES = 16 + +OPT_OUT_OP = ["layout_transform"] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/base_graph_tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/base_graph_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..7e975201c86adcf8a9eb2a5f35b7edc3cbab7432 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/base_graph_tuner.py @@ -0,0 +1,591 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-instance-attributes,too-many-branches,too-many-nested-blocks,invalid-name,unused-argument,unused-variable,no-member,no-value-for-parameter +"""Base class for graph tuner.""" +import logging +from abc import abstractmethod + +import numpy as np +from tvm import topi + +import tvm +from tvm import te +from tvm import autotvm, relay +from tvm.autotvm.task import get_config +from tvm.autotvm.record import encode, load_from_file +from tvm.autotvm.measure import MeasureResult, MeasureInput +from tvm.target import Target + +from ...target import Target +from .utils import ( + is_boundary_node, + get_in_nodes, + get_out_nodes, + has_multiple_inputs, + bind_inputs, + expr2graph, +) +from ._base import INVALID_LAYOUT_TIME + +from ._base import OPT_OUT_OP + + +def get_infer_layout(task_name): + if task_name.startswith("conv2d"): + return topi.nn.conv2d_infer_layout + if task_name.startswith("depthwise_conv2d"): + return topi.nn.depthwise_conv2d_infer_layout + raise ValueError(f"Cannot find infer layout for task {task_name}") + + +@autotvm.template("layout_transform") +def layout_transform(*args): + """Autotvm layout transform template.""" + cfg = get_config() + cfg.add_flop(-1) + data = args[0] + out = topi.layout_transform(*args) + sch = topi.generic.schedule_injective([out]) + return sch, [data, out] + + +class BaseGraphTuner(object): + """Class to search schedules considering both kernel execution time and + layout transformation time. + + Before creating a Graph Executor instance, schedule candidates for all kernels in + graph should be provided through tensor-level tuning. + """ + + def __init__( + self, + graph, + input_shapes, + records, + target_ops, + target, + max_sch_num=20, + dtype="float32", + verbose=True, + log_file="graph_tuner.log", + log_level=logging.DEBUG, + name="graph_tuner", + ): + """Create a GlobalTuner instance. Local schedule searching for all nodes with + target_op in the input graph and layout transformation benchmark need to be + executed before initialization. + + graph : tvm.relay.function.Function + Input graph + + input_shapes : dict of str to tuple. + Input shapes of graph + + records : str or iterator of (MeasureInput, MeasureResult) + Collection of kernel level tuning records. + If it is str, then it should be the filename of a records log file. + Each row of this file is an encoded record pair. + Otherwise, it is an iterator. + + target_ops : List of tvm.ir.Op + Target tuning operators. + + target : str or tvm.target + Compilation target. + + max_sch_num : int, optional + Maximum number of schedule candidates for each workload. + + dtype : str, optional + Data type. + + log_file : str, optional + graph tuner log file name + + name : str, optional + Name of global tuner. + """ + self._node_list = [] + self._layout_transform_perf_records = {} + self._layout_transform_interlayer_cost = {} + self._input_shapes = input_shapes + self._target_ops = target_ops + + self._name = name + self._max_sch_num = max_sch_num + self._optimal_sch_dict = {} + self._records = records + self._dtype = dtype + if isinstance(target, str): + target = Target(target) + self._target = target + self._optimal_record_dict = {} + + # Set up logger + self._verbose = verbose + self._logger = logging.getLogger(name + "_logger") + need_file_handler = need_console_handler = True + for handler in self._logger.handlers: + if handler.__class__.__name__ == "FileHandler": + need_file_handler = False + if handler.__class__.__name__ == "StreamHandler": + need_console_handler = False + self._log_level = log_level + self._log_file = log_file + self._formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s") + self._logger.setLevel(log_level) + if need_file_handler: + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter(self._formatter) + self._logger.addHandler(file_handler) + if self._verbose and need_console_handler: + console_handler = logging.StreamHandler() + console_handler.setFormatter(self._formatter) + self._logger.addHandler(console_handler) + self._logger.setLevel(log_level) + self._logger.propagate = False + + # Generate workload and schedule dictionaries. + if isinstance(graph, tvm.IRModule): + graph = graph["main"] + + if isinstance(graph, relay.function.Function): + node_dict = {} + graph = bind_inputs(graph, input_shapes, dtype) + expr2graph(graph, self._target_ops, node_dict, self._node_list, target) + else: + raise RuntimeError(f"Unsupported graph type: {type(graph)}") + + self._graph = graph + self._in_nodes_dict = get_in_nodes(self._node_list, self._target_ops, input_shapes.keys()) + if len(self._in_nodes_dict) == 0: + raise RuntimeError( + f"Could not find any input nodes with whose " + f"operator is one of {self._target_ops}" + ) + self._out_nodes_dict = get_out_nodes(self._in_nodes_dict) + self._fetch_cfg() + self._opt_out_op = OPT_OUT_OP + + # Setup infer_layout for elemwise-like nodes + # Note: graph tuner currently only supports tuning of single input and single output + # op as target op, such as conv2d, dense and conv2d_transpose. In this case, we can + # reuse infer_layout function from target ops for elemwise-like nodes. The behavior + # is to modify the first tensor shape of input workload to the output shape of + # elemwise-like node, and use infer_layout function from input op to generate layouts. + input_names = self._input_shapes.keys() + for idx in sorted(self._in_nodes_dict.keys()): + if has_multiple_inputs(self._node_list, idx, input_names, self._opt_out_op): + node_entry = self._node_list[idx] + node_entry["topi_op"] = [] + node_entry["workloads"] = [] + for input_idx in self._in_nodes_dict[idx]: + input_node = self._node_list[input_idx] + if not is_boundary_node(input_node, input_names): + input_topi_op = input_node["topi_op"][0] + node_entry["topi_op"].append(input_topi_op) + # Only replace the first input tensor + input_workload = input_node["workloads"][0] + first_tensor = input_workload[1] + dtype = first_tensor[-1] + new_shape = tuple([val.value for val in node_entry["types"][0].shape]) + actual_workload = ( + (input_workload[0],) + + (("TENSOR", new_shape, dtype),) + + input_workload[2:] + ) + node_entry["workloads"].append(actual_workload) + if "record_candidates" not in node_entry: + node_entry["record_candidates"] = input_node["record_candidates"] + else: + node_entry["topi_op"].append(None) + node_entry["workloads"].append(None) + + def _fetch_cfg(self): + """Read and pre-process input schedules.""" + if isinstance(self._records, str): + records = load_from_file(self._records) + else: + records = self._records + cfg_dict = {} + for record in records: + in_measure, _ = record + workload = in_measure.task.workload + if workload not in cfg_dict: + cfg_dict[workload] = [] + cfg_dict[workload].append(record) + + cache_dict = {} + for key in self._in_nodes_dict: + node_entry = self._node_list[key] + if node_entry["op"] not in self._target_ops: + continue + workload = node_entry["workloads"][0] + if workload in cache_dict: + node_entry["record_candidates"] = cache_dict[workload] + continue + record_candidates = [] + infer_layout_func = get_infer_layout(node_entry["topi_op"][0]) + layout_tracking_dict = {} + for record in cfg_dict[workload]: + in_measure, out_measure = record + workload = in_measure.task.workload + cfg = in_measure.config + # For multiple cfgs which produces the same in/out layouts, + # only the most efficient one is preserved. + with self._target: + layouts = infer_layout_func(workload, cfg) + if layouts in layout_tracking_dict: + cost = out_measure.costs[0] + current_best_cost = layout_tracking_dict[layouts][1].costs[0] + if cost < current_best_cost: + layout_tracking_dict[layouts] = record + else: + layout_tracking_dict[layouts] = record + sorted_records = sorted( + layout_tracking_dict.values(), key=lambda item: item[1].costs[0] + ) + for i in range(min(self._max_sch_num, len(sorted_records))): + record_candidates.append(sorted_records[i]) + node_entry["record_candidates"] = record_candidates + cache_dict[workload] = record_candidates + + def _iterate_layout_transform(self, callback): + """Iterate all possible layout transformations and execute callback for each + iteration. callback function accepts 6 arguments: from_node_idx, to_node_idx, + from_sch_idx, to_sch_idx, args which represent the argument list of layout + transformation and is_valid showing whether this is a valid layout transformation. + """ + input_names = self._input_shapes.keys() + pair_tracker = set() + for key, val in self._in_nodes_dict.items(): + node_entry = self._node_list[key] + target_input_idx = -1 + target_input_pos = -1 + if has_multiple_inputs(self._node_list, key, input_names, self._opt_out_op): + for i, item in enumerate(val): + node = self._node_list[item] + if not is_boundary_node(node, input_names): + target_input_idx = item + target_input_pos = i + break + + for i, item in enumerate(val): + i_idx = item + in_node_entry = self._node_list[i_idx] + if is_boundary_node(in_node_entry, input_names): + continue + + if node_entry["op"] in self._target_ops: + o_idx = key + o_infer_layout_func = get_infer_layout(node_entry["topi_op"][0]) + o_wkl = node_entry["workloads"][0] + i_topi_op = in_node_entry["topi_op"][0] + i_wkl = in_node_entry["workloads"][0] + pivot = 0 + while not i_wkl: + pivot += 1 + i_topi_op = in_node_entry["topi_op"][pivot] + i_wkl = in_node_entry["workloads"][pivot] + i_infer_layout_func = get_infer_layout(i_topi_op) + else: + o_idx = target_input_idx + if i <= target_input_pos: + continue + o_infer_layout_func = get_infer_layout(node_entry["topi_op"][0]) + o_wkl = node_entry["workloads"][target_input_pos] + i_infer_layout_func = get_infer_layout(node_entry["topi_op"][i]) + i_wkl = node_entry["workloads"][i] + + if (i_idx, o_idx) in pair_tracker: + continue + pair_tracker.add((i_idx, o_idx)) + + for m, i_record in enumerate(in_node_entry["record_candidates"]): + for n, o_record in enumerate(node_entry["record_candidates"]): + i_cfg, o_cfg = i_record[0].config, o_record[0].config + with self._target: + i_input_info, i_output_info = i_infer_layout_func(i_wkl, i_cfg) + o_input_info, o_output_info = o_infer_layout_func(o_wkl, o_cfg) + if ( + len(i_input_info) > 1 + or len(i_output_info) > 1 + or len(o_input_info) > 1 + or len(o_output_info) > 1 + ): + raise RuntimeError( + "Graph tuner only supports target operator " + "with single input and single output. " + "Please check target_ops argument." + ) + + in_shape, in_layout = i_output_info[0] + if node_entry["op"] in self._target_ops: + _, out_layout = o_input_info[0] + else: + _, out_layout = o_output_info[0] + data_placeholder = te.placeholder(in_shape, name="data", dtype=self._dtype) + args = [data_placeholder, in_layout, out_layout] + callback(i_idx, o_idx, m, n, args) + + def _create_matrix_callback(self, from_node_idx, to_node_idx, from_sch_idx, to_sch_idx, args): + """Create dictionary containing matrix format of layout transformation + between nodes.""" + in_layout, out_layout = args[1], args[2] + ltf_workload = autotvm.task.args_to_workload(args, "layout_transform") + idx_pair_key = (from_node_idx, to_node_idx) + + if in_layout == out_layout: + layout_transform_time = 0 + else: + layout_transform_time = self._layout_transform_perf_records[ltf_workload][1].costs[0] + + if idx_pair_key not in self._layout_transform_interlayer_cost: + self._layout_transform_interlayer_cost[idx_pair_key] = [] + if len(self._layout_transform_interlayer_cost[idx_pair_key]) <= from_sch_idx: + self._layout_transform_interlayer_cost[idx_pair_key].append([]) + self._layout_transform_interlayer_cost[idx_pair_key][from_sch_idx].append( + layout_transform_time + ) + + def benchmark_layout_transform( + self, + min_exec_num=100, + timeout=10, + use_rpc=False, + device_key=None, + host="127.0.0.1", + port=9190, + n_parallel=1, + build_func="default", + layout_records=None, + target_host=None, + infer_layout=False, + runner=None, + ): + """Benchmark all possible layout transformation in the graph, + given a set of schedule candidates for each workload of target operator. + + Parameters + ---------- + min_exec_num : int, optional + Minimum number of execution. Final execution time is the average of + all execution time. + + timeout : int, optional + Time out for each execution. + + use_rpc : boolean, optional + Whether to use rpc mode for benchmarking. + + device_key : str, optional + Remote device key which can be queried by + python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190 + + host : str, optional + IP address used to create RPC tracker on host machine. + + port : int, optional + Port number used to create RPC tracker on host machine. + + n_parallel: int, optional + The number of measurement task that can run in parallel. + Set this according to the number of cpu cores (for compilation) and + the number of devices you have (for measuring generate code). + + build_func: str or callable, optional + 'default': call default builder. This works for normal target (llvm, cuda) + + 'ndk': use Android NDK to create shared library. Use this for android target. + + callable: customized build function for other backends (e.g. VTA). + See autotvm/measure/measure_methods.py::default_build_func for example. + + layout_records : str or iterator of (MeasureInput, MeasureResult). optional + Collection of layout_transform benchmarking records. + If is str, then it should be the filename of a records log file. + Each row of this file is an encoded record pair. + Otherwise, it is an iterator. + + If this argument is set, graph tuner will first check whether layout_transform + workload already exists in records and skip benchmarking if possible. + + target_host : str, optional + str or :any:`tvm.target.Target` optional + Host compilation target, if target is device. + When TVM compiles device specific program such as CUDA, + we also need host(CPU) side code to interact with the driver + setup the dimensions and parameters correctly. + target_host is used to specify the host side codegen target. + By default, llvm is used if it is enabled, + otherwise a stackvm intepreter is used. + + infer_layout : bool, optional + Whether to infer layout transformation time if it doesn't exist in records, instead + of benchmarking on target device. + + This might bring performance loss comparing to benchmarking layout transformation. + runner : Runner, optional + Accept a user-supplied runner + """ + self._logger.info("Start to benchmark layout transformation...") + self._target, target_host = Target.canon_target_and_host(self._target, target_host) + + if layout_records is None and infer_layout: + raise RuntimeError("Requires some records to infer layout transformation time.") + + if isinstance(layout_records, str): + layout_records = load_from_file(layout_records) + if not layout_records and infer_layout: + raise RuntimeError("Records must be non-empty to infer layout transformation time.") + + if isinstance(layout_records, str): + layout_records = load_from_file(layout_records) + num_flops, total_time = 0, 0 + if layout_records is not None: + for record in layout_records: + ltf_wkl = record[0].task.workload + self._layout_transform_perf_records[ltf_wkl] = record + input_shape = ltf_wkl[1][1] + flops = np.prod(input_shape) + num_flops += flops + total_time += record[1].costs[0] + avg_time = total_time / num_flops if num_flops > 0 else 0 + + args_list = [] + + def _fetch_args_callback(from_node_idx, to_node_idx, from_sch_idx, to_sch_idx, args): + """Callback function to fetch layout transform args""" + _, in_layout, out_layout = args + if in_layout != out_layout: + args_list.append(args) + + self._iterate_layout_transform(_fetch_args_callback) + + def _log_to_list(record_list): + """Callback to log result to a list.""" + + def _callback(_, inputs, results): + """Callback implementation""" + record_list.append((inputs[0], results[0])) + + return _callback + + builder = autotvm.LocalBuilder(n_parallel=n_parallel, build_func=build_func) + if use_rpc: + if device_key is None: + raise RuntimeError("device_key need to be set to use rpc tracker mode.") + runner = autotvm.measure.RPCRunner( + device_key, + host, + port, + n_parallel=n_parallel, + number=min_exec_num, + repeat=1, + timeout=timeout, + ) + elif not runner: + runner = autotvm.LocalRunner(number=min_exec_num, repeat=1, timeout=timeout) + measure_option = autotvm.measure_option(builder=builder, runner=runner) + for args in args_list: + data, in_layout, out_layout = args + ltf_workload = autotvm.task.args_to_workload(args, "layout_transform") + if ltf_workload in self._layout_transform_perf_records: + continue + + if infer_layout: + input_shape = ltf_workload[1][1] + flops = 1 + for i in input_shape: + flops *= i + + # Rule out invalid layout transformations + out = topi.layout_transform(data, in_layout, out_layout) + out_flops = 1 + for i in topi.utils.get_const_tuple(out.shape): + out_flops *= i + + if flops != out_flops: + inferred_time = INVALID_LAYOUT_TIME + else: + inferred_time = flops * avg_time + + record_input = MeasureInput(target=self._target, task=None, config=None) + record_output = MeasureResult( + costs=(inferred_time,), error_no=0, all_cost=-1, timestamp=-1 + ) + self._layout_transform_perf_records[ltf_workload] = (record_input, record_output) + continue + + records = [] + task = autotvm.task.create("layout_transform", args=args, target=self._target) + tuner = autotvm.tuner.GridSearchTuner(task) + tuner.tune(n_trial=1, measure_option=measure_option, callbacks=[_log_to_list(records)]) + if not isinstance(records[0][1].costs[0], float): + records[0] = (records[0][0], records[0][1]._replace(costs=(INVALID_LAYOUT_TIME,))) + self._layout_transform_perf_records[ltf_workload] = records[0] + + self._iterate_layout_transform(self._create_matrix_callback) + self._logger.info("Benchmarking layout transformation successful.") + + @property + def layout_transform_perf_records(self): + """Get layout transformation dictionary for input graph. + + Returns + ------- + layout_transform_perf_records : dict of tuple to (MeasureInput, MeasureResult) + Layout transformation dictionary for input graph. + """ + return self._layout_transform_perf_records + + def get_optimal_records(self): + """Convert optimal record dictionary to a list of records + with ascending order of node index in graph. + + Returns + ------- + sch_list : list of tuple + List of records with ascending order of node index in graph. + """ + ordered_index_list = sorted(self._optimal_record_dict.keys()) + ret = [] + for index in ordered_index_list: + node_entry = self._node_list[index] + if node_entry["op"] not in self._target_ops: + continue + ret.append(node_entry["record_candidates"][self._optimal_record_dict[index]]) + return ret + + def write_opt_sch2record_file(self, record_file="graph_opt_schedule.log"): + """Write graph level optimal schedules into file. + + Parameters + ---------- + record_file : str, optional + Output schedule file. + """ + with open(record_file, "a") as out_file: + records = self.get_optimal_records() + for record in records: + out_file.write(encode(record[0], record[1]) + "\n") + msg = f"Writing optimal schedules to {record_file} successfully." + self._logger.info(msg) + + @abstractmethod + def run(self, **kwargs): + """Run graph tuning.""" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py new file mode 100644 index 0000000000000000000000000000000000000000..2d7560272e6d1d2c296085e4a024fa36489916cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py @@ -0,0 +1,371 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=too-many-instance-attributes,too-many-branches,too-many-statements,too-many-arguments,too-many-locals,invalid-name +"""Stage class for dynamic programming tuner""" +import numpy as np + +from .utils import is_boundary_node + + +class DPStage(object): + """Class to represent node in Markov decision process. A stage has states + to represent different schedules of the current node. Since in this problem + the action is the schedule selected for current node, action can be fully + represented by states. No extra attribute needs for action. + + In most cases, instance of this class should be created through DPTuner. + """ + + def __init__( + self, + idx, + input_shapes, + node_list, + counted_nodes_set, + layout_transform_interlayer_cost, + stage_dict, + in_nodes_dict, + out_nodes_dict, + dep_dict, + target_ops, + dtype="float32", + ): + """Initialize a stage and create all states. + + Parameters + ---------- + idx : int + Index for current node. + + input_shapes : dict of string to tuple of int + Input shapes for current graph. + + node_list : list of dict + List of all nodes for current graph. + + counted_nodes_set : set of int + Global set recording whether the execution time of a node has been counted. + + layout_transform_interlayer_cost : dict of tuple to list + Dictionary maps node index pair to layout transformation time between them. + + stage_dict : dict of int to Stage + Global dictionary for all stages mapping node index to stage. + + in_nodes_dict : dict of int to list of int + Dictionary maps node index to corresponding input node index. + + out_nodes_dict : dict of int to list of int + Dictionary maps node index to corresponding output node index. + + dep_dict : dict of int to set of int + Dictionary maps node index to dependent node index. + + target_ops : list of str + Target operators + + dtype : str, optional + Data type. + """ + self._global_input_shapes = input_shapes + self._global_input_names = input_shapes.keys() + self._global_node_list = node_list + self._global_counted_nodes_set = counted_nodes_set + self._global_layout_transform_interlayer_cost = layout_transform_interlayer_cost + self._global_stage_dict = stage_dict + self._global_in_nodes_dict = in_nodes_dict + self._global_out_nodes_dict = out_nodes_dict + self._global_dep_dict = dep_dict + + self._idx = idx + self._node_entry = self._global_node_list[idx] + self._target_ops = target_ops + self._wkl = self._node_entry["workloads"][0] + self._record_list = self._node_entry["record_candidates"] + self._dep = [] + self._dtype = dtype + self._states = None + self._full_states = None + self._full_states_idx = None + self._create_states() + + def _create_states(self): + """Create states.""" + node = self._global_node_list[self._idx] + if node["op"] in self._target_ops: + self._create_op_states() + else: + self._create_multi_inputs_states() + + def _create_op_states(self): + """State creation routine for nodes with target_op.""" + input_idx = self._global_in_nodes_dict[self._idx][0] + input_node_entry = self._global_node_list[input_idx] + if is_boundary_node(input_node_entry, self._global_input_names): + self._full_states = np.array([record[1].costs[0] for record in self._record_list]) + self._states = self._full_states + else: + input_stage = self._global_stage_dict[input_idx] + input_dep = input_stage.dep + input_states = input_stage.states + input_flatten_states = input_states.flatten() + input_record_list = input_node_entry["record_candidates"] + num_schedules = len(self._record_list) + num_input_schedules = len(input_record_list) + num_input_states = input_flatten_states.shape[0] + + full_states_shape = tuple( + [num_schedules, num_input_schedules] + + [ + len(self._global_node_list[dep_idx]["record_candidates"]) + for dep_idx in input_dep + ] + ) + self._full_states = np.zeros(full_states_shape).flatten().astype("float32") + self._full_states_idx = [self._idx, input_idx] + input_dep + dep_multiplier = 1 + for i in range(2, len(full_states_shape)): + dep_multiplier *= full_states_shape[i] + input_node_time_counted = input_idx in self._global_counted_nodes_set + + for i in range(num_schedules): + current_sch_time = float(self._record_list[i][1].costs[0]) + for j in range(num_input_states): + input_sch_idx = j // dep_multiplier + layout_transform_time = self._global_layout_transform_interlayer_cost[ + (input_idx, self._idx) + ][input_sch_idx][i] + + if input_node_time_counted: + total_time = current_sch_time + layout_transform_time + else: + total_time = ( + current_sch_time + layout_transform_time + input_flatten_states[j] + ) + current_state_idx = i * num_input_states + j + self._full_states[current_state_idx] = total_time + + if not input_node_time_counted: + self._global_counted_nodes_set.add(input_idx) + self._full_states = self._full_states.reshape(full_states_shape) + + # If out degree of input node is 1, we can remove the dimension of input node, + # since the states of input node will not be needed any more. Otherwise, input + # node should become a dependency. + if len(self._global_out_nodes_dict[input_idx]) == 1: + self._states = np.amin(self._full_states, axis=1) + self._dep = list(input_dep) + else: + self._states = self._full_states + self._dep = [ + input_idx, + ] + input_dep + + # Update global dependency dictionary. + # This is to monitor the dependency states to decide + # when a dependency can be eliminated, so that total + # number of states can be largely reduced. + for dep_idx in self._dep: + self._global_dep_dict[dep_idx].remove(self._idx) + for child in self._global_out_nodes_dict[self._idx]: + self._global_dep_dict[dep_idx].add(child) + if len(self._global_out_nodes_dict[self._idx]) > 1: + self._global_dep_dict[self._idx] = set() + for child in self._global_out_nodes_dict[self._idx]: + self._global_dep_dict[self._idx].add(child) + + def _create_multi_inputs_states(self): + """State creation routine for multi_input operator + + In tvm, layout transformation for an elemwise-like follow the rule which + all input operators transform their layouts to the leftmost input operator + layout. For example: + elemwise-sum + | | | + | | | + op0 op1 op2 + In this block, the possible layout transformations are: op1 -> op0 and op2 -> op0. + In graph tuning, a 3-D array with shape (k0, k1, k2) can represent the layout + transformations between these three nodes. It is also possible some earlier states + belong to other nodes(We name them as dependency) are required for dynamic programming. + The final states array for this elemwise-sum can be with shape (e0, k0, k1, e1, k2). + To iterate through all states, we first align the shape of op0, op1 and op2 to be + (e0, k0, k1, e1, k2) by broadcasting the original states. We also record the axis of + each input node in the states array, together with the multiplier. For example, + the axis index for op0 is 1, and multiplier is k1 * e1 * k2. If current iterating index + in the flatten array is i, the index of op0 can be computed as: + i % (k0 * k1 * e1 * k2) // (k1 * e1 * k2). + """ + full_input_node_list = list(self._global_in_nodes_dict[self._idx]) + input_index_list = [] + # Remove input and ruled_out nodes + for input_idx in full_input_node_list: + input_node = self._global_node_list[input_idx] + if not is_boundary_node(input_node, self._global_input_names): + input_index_list.append(input_idx) + + # Generate new states + states_list, aligned_node_list = DPStage.align_states( + input_index_list, self._global_stage_dict, self._global_node_list + ) + target_node_idx, target_major_axis, target_multiplier, target_states = states_list[0] + aligned_shape = target_states.shape + self._full_states = np.zeros(aligned_shape).astype("float32").flatten() + self._full_states_idx = list(aligned_node_list) + num_states = self._full_states.shape[0] + node_time_counted = [item[0] in self._global_counted_nodes_set for item in states_list] + target_states = target_states.flatten() + src_states_list = [states_list[i][3].flatten() for i in range(1, len(states_list))] + + for i in range(num_states): + target_sch_idx = ( + i % (target_multiplier * aligned_shape[target_major_axis]) + ) // target_multiplier + if node_time_counted[0]: + new_state = 0 + else: + new_state = target_states[i] + + for j in range(1, len(states_list)): + src_states = src_states_list[j - 1] + src_node_idx, src_major_axis, src_multiplier, _ = states_list[j] + src_sch_idx = ( + i % (src_multiplier * aligned_shape[src_major_axis]) + ) // src_multiplier + layout_transform_time = self._global_layout_transform_interlayer_cost[ + (src_node_idx, target_node_idx) + ][src_sch_idx][target_sch_idx] + + if node_time_counted[j]: + new_state += layout_transform_time + else: + new_state += layout_transform_time + src_states[i] + self._full_states[i] = new_state + + for i, node_counted in enumerate(node_time_counted): + if not node_counted: + self._global_counted_nodes_set.add(states_list[i][0]) + self._full_states = self._full_states.reshape(aligned_shape) + + # Remove dependency to reduce states + reduced_states = np.array(self._full_states) + reduced_states_transpose = [states_list[0][1]] + reduced_states_dep_list = [] + self._dep = [] + for i in range(len(reduced_states.shape)): + if i != states_list[0][1]: + reduced_states_transpose.append(i) + reduced_states_dep_list.append(aligned_node_list[i]) + reduced_states = np.transpose(reduced_states, reduced_states_transpose) + shift = 0 + for i, dep in enumerate(reduced_states_dep_list): + if dep not in self._global_dep_dict or len(self._global_dep_dict[dep]) == 1: + self._global_dep_dict.pop(dep, None) + reduced_states = np.amin(reduced_states, axis=i + 1 - shift) + shift += 1 + else: + self._dep.append(dep) + self._states = reduced_states + + # Update dependency + for dep in self._dep: + self._global_dep_dict[dep].remove(self._idx) + for child in self._global_out_nodes_dict[self._idx]: + self._global_dep_dict[dep].add(child) + if len(self._global_out_nodes_dict[self._idx]) > 1: + self._global_dep_dict[self._idx] = set() + for child in self._global_out_nodes_dict[self._idx]: + self._global_dep_dict[self._idx].add(child) + + @property + def dep(self): + """Get dependency list.""" + return self._dep + + @property + def states(self): + """Get states.""" + return self._states + + @property + def full_states(self): + """Get complete states.""" + return self._full_states + + @property + def full_states_idx(self): + """Get node index of complete states.""" + return self._full_states_idx + + @staticmethod + def align_states(input_index_list, stage_dict, node_list): + """Align all input node states shapes to be the same and transpose/reshape properly. + + This is used in creating multi_input operator states. + + Parameters + ---------- + input_index_list : list of int + List of input node index. + + stage_dict : dict of int to Stage + Global dictionary of node index to stage. + + node_list : list of dict + List of all nodes for current graph. + + Returns + ------- + states_list : list of tuple + List of aligned states. + + aligned_node_list : list in int + List of node index for aligned states. + """ + aligned_node_list = list(input_index_list) + states_list = [] + for input_idx in input_index_list: + input_node_stage = stage_dict[input_idx] + for dep_idx in input_node_stage.dep: + if dep_idx not in aligned_node_list: + aligned_node_list.append(dep_idx) + aligned_shape = [] + for idx in aligned_node_list: + aligned_shape.append(len(node_list[idx]["record_candidates"])) + for input_idx in input_index_list: + input_node_stage = stage_dict[input_idx] + input_node_shape_idx_list = [input_idx] + input_node_stage.dep + transpose_idx_list = [] + reshape_list = [] + major_axis = -1 + for i, idx in enumerate(aligned_node_list): + if input_idx == idx: + major_axis = i + if idx in input_node_shape_idx_list: + transpose_idx_list.append(idx) + reshape_list.append(aligned_shape[i]) + else: + reshape_list.append(1) + transpose_list = [input_node_shape_idx_list.index(idx) for idx in transpose_idx_list] + input_node_states = np.transpose(input_node_stage.states, tuple(transpose_list)) + input_node_states = np.reshape(input_node_states, tuple(reshape_list)) + input_node_states = np.broadcast_to(input_node_states, aligned_shape) + multiplier = 1 + for i in range(major_axis + 1, len(aligned_shape)): + multiplier *= aligned_shape[i] + states_list.append((input_idx, major_axis, multiplier, input_node_states)) + return states_list, aligned_node_list diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..97253e406be18847d141a14279b19fc9ccff8c41 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py @@ -0,0 +1,208 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=import-error,too-many-locals,too-many-statements,too-many-branches,unused-variable +"""Dynamic programming tuner.""" +import sys +import numpy as np + +from ._base import MAX_OUTPUT_NODES +from .base_graph_tuner import BaseGraphTuner +from .dynamic_programming_stage import DPStage +from .utils import has_multiple_inputs, is_boundary_node + +if sys.version_info[0] == 3: + import queue +else: + import Queue as queue + + +class DPTuner(BaseGraphTuner): + """Tuner which uses dynamic programming to solve MDP problem. + + Note: currently dynamic programming is used to solve this MDP problem. However, + this problem is intrinsically non-polynomial. DP can't apply for more complicated + models, such as networks with many element-wise sum operators. In this case, switch + to heuristic algorithm such as PBQP tuner. + """ + + def __init__(self, *args, **kwargs): + """Create a dynamic programming tuner.""" + super(DPTuner, self).__init__(*args, **kwargs) + self._num_states = self._max_num_states = None + self._stage_dict = {} + self._dep_dict = {} + self._counted_nodes_set = set() + + self._global_data_dict = { + "dtype": self._dtype, + "counted_nodes_set": self._counted_nodes_set, + "stage_dict": self._stage_dict, + "in_nodes_dict": self._in_nodes_dict, + "out_nodes_dict": self._out_nodes_dict, + "dep_dict": self._dep_dict, + "node_list": self._node_list, + "input_shapes": self._input_shapes, + "layout_transform_interlayer_cost": self._layout_transform_interlayer_cost, + } + + def _check_num_states(self, num_states): + """Track the number of states.""" + self._num_states += num_states + if self._max_num_states is not None: + if self._num_states > self._max_num_states: + raise RuntimeError( + "Too many states detected while running dynamic " + "programming: got %d states but upper limit is %d." + % (self._num_states, self._max_num_states) + ) + + def _forward(self): + """Forward pass in DP to generate states for all stages.""" + self._logger.info("Start forward pass...") + for node_idx in sorted(self._in_nodes_dict.keys()): + stage = DPStage(idx=node_idx, target_ops=self._target_ops, **self._global_data_dict) + self._check_num_states(stage.full_states.size) + self._stage_dict[node_idx] = stage + self._logger.info("Finished forward pass.") + + def _backward(self): + """Backward pass in DP to generate optimal solution.""" + self._logger.info("Start backward pass...") + input_names = self._input_shapes.keys() + optimal_record_dict = {} + # Pick optimal schedule for output nodes + output_idx_list = [] + for key, val in self._out_nodes_dict.items(): + if not val: + output_idx_list.append(key) + + # Restrict number of output nodes to avoid numpy reshape error + if len(output_idx_list) > MAX_OUTPUT_NODES: + msg = ( + "The number of outputs in graph is larger than upper " + "limit: %s vs %s. Usually this is caused by too many " + "LAYOUT_FIXED_OP in graph. Switch to greedily select schedule." + "No action required at this moment. We will continuously improve graph tuner" + % (len(output_idx_list), MAX_OUTPUT_NODES) + ) + self._logger.warning(msg) + self._optimal_record_dict = {key: 0 for key in self._in_nodes_dict} + return + + states_list, aligned_node_list = DPStage.align_states( + output_idx_list, self._stage_dict, self._node_list + ) + num_states = states_list[0][3].size + self._check_num_states(num_states * len(output_idx_list)) + aligned_node_shape = states_list[0][3].shape + min_time = 0 + min_pos = -1 + for states in states_list: + min_time += np.amax(states[3]) + flatten_states_list = [current_states[3].flatten() for current_states in states_list] + for i in range(num_states): + current_time = 0 + for j, current_states in enumerate(states_list): + current_time += flatten_states_list[j][i] + if min_time > current_time: + min_time = current_time + min_pos = i + for i, states in enumerate(states_list): + current_major_axis = states[1] + current_sch_idx = ( + min_pos % (states[2] * aligned_node_shape[current_major_axis]) + ) // states[2] + optimal_record_dict[aligned_node_list[i]] = current_sch_idx + # Pick optimal schedule for dependencies of output nodes + for i in range(len(states_list), len(aligned_node_list)): + multiplier = 1 + for j in range(i + 1, len(aligned_node_list)): + multiplier *= aligned_node_shape[j] + optimal_record_dict[aligned_node_list[i]] = ( + min_pos // multiplier % aligned_node_shape[i] + ) + + # Backward pass to get optimal schedules for other nodes + bfs_q = queue.Queue() + visited = set() + for out_idx in output_idx_list: + bfs_q.put(out_idx) + while not bfs_q.empty(): + node_idx = bfs_q.get() + visited.add(node_idx) + node = self._node_list[node_idx] + if is_boundary_node(node, input_names): + continue + optimal_sch_idx = optimal_record_dict[node_idx] + full_states = self._stage_dict[node_idx].full_states + if not has_multiple_inputs(self._node_list, node_idx, input_names, self._opt_out_op): + input_idx = self._in_nodes_dict[node_idx][0] + input_node = self._node_list[input_idx] + if is_boundary_node(input_node, input_names): + continue + if input_idx not in visited: + bfs_q.put(input_idx) + if input_idx not in optimal_record_dict: + dep_list = self._stage_dict[node_idx].dep + dep_idx = tuple([optimal_record_dict[item] for item in dep_list]) + tmp = np.argmin(full_states, axis=1) + optimal_input_sch_idx = tmp[(optimal_sch_idx,) + dep_idx] + optimal_record_dict[input_idx] = optimal_input_sch_idx + else: + input_idx_list = self._in_nodes_dict[node_idx] + optimal_record_dict[input_idx_list[0]] = optimal_sch_idx + full_states_idx = self._stage_dict[node_idx].full_states_idx + tmp = full_states[optimal_sch_idx] + new_states_idx, new_states_pos = [], [] + visited_states_idx, visited_states_pos = [], [] + for i in range(1, len(full_states_idx)): + if full_states_idx[i] in optimal_record_dict: + visited_states_idx.append(full_states_idx[i]) + visited_states_pos.append(i - 1) + else: + new_states_idx.append(full_states_idx[i]) + new_states_pos.append(i - 1) + if visited_states_idx: + tmp = np.transpose(tmp, tuple(visited_states_pos + new_states_pos)) + tmp = tmp[tuple([optimal_record_dict[idx] for idx in visited_states_idx])] + min_pos = np.argmin(tmp) + multiplier = 1 + for i in range(len(new_states_idx)): + multiplier *= full_states.shape[new_states_pos[i] + 1] + for pos, idx in zip(new_states_pos, new_states_idx): + multiplier //= full_states.shape[pos + 1] + optimal_record_dict[idx] = min_pos // multiplier + min_pos %= multiplier + for input_idx in input_idx_list: + if input_idx not in visited: + bfs_q.put(input_idx) + + self._optimal_record_dict = optimal_record_dict + for node_idx, _ in self._in_nodes_dict.items(): + if self._node_list[node_idx]["op"] not in self._target_ops: + continue + self._logger.info("Finished backward pass...") + + def run(self, **kwargs): + """Run dynamic programming solver.""" + max_num_states = None if "max_num_states" not in kwargs else kwargs["max_num_states"] + self._num_states = 0 + self._max_num_states = max_num_states + self._logger.info("Start to run dynamic programming algorithm...") + self._forward() + self._backward() + self._logger.info("Finished DPExecutor run.") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/pbqp_tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/pbqp_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..c02cb2a5adcfd44cb70dfb95c10dc5d058152eb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/pbqp_tuner.py @@ -0,0 +1,288 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, too-many-locals, unnecessary-list-index-lookup +"""Partitioned Boolean Quadratic Programming Tuner""" +from ._base import INVALID_LAYOUT_TIME +from .base_graph_tuner import BaseGraphTuner +from .utils import is_boundary_node, has_multiple_inputs + + +class PBQPTuner(BaseGraphTuner): + """An approximation method to deal with intractably + large size of graph tuning problem. + + This graph coloring algorithm mainly comes from: + + Lang Hames and Bernhard Scholz. + Nearly optimal register allocation with pbqp.JMLC 2006. + LNCS, vol.4228,pp. 346-361, 2016 + """ + + def __init__(self, *args, **kwargs): + """Create a partitioned boolean quadratic programming tuner.""" + super(PBQPTuner, self).__init__(*args, **kwargs) + + # Remove input and ruled_out nodes + input_names = self._input_shapes.keys() + for node_idx in self._out_nodes_dict: + node = self._node_list[node_idx] + if is_boundary_node(node, input_names): + for out_node_idx in self._out_nodes_dict[node_idx]: + self._in_nodes_dict[out_node_idx].remove(node_idx) + + self._adj_dict = {} + for node_idx in self._in_nodes_dict: + self._adj_dict[node_idx] = list(self._in_nodes_dict[node_idx]) + list( + self._out_nodes_dict[node_idx] + ) + + self._record_cost_dict = {} + for key in self._in_nodes_dict: + self._record_cost_dict[key] = [] + for record in self._node_list[key]["record_candidates"]: + self._record_cost_dict[key].append(record[1].costs[0]) + + self._max_degree = -1 + self._node_degree_dict = {} + for node_idx in self._in_nodes_dict: + node_degree = self._get_degree(node_idx) + self._node_degree_dict[node_idx] = node_degree + self._max_degree = max(self._max_degree, node_degree) + + self._stack = [] + self._buckets = [[] for _ in range(self._max_degree + 2)] + for node_idx in sorted(self._in_nodes_dict): + node_degree = self._get_degree(node_idx) + self._buckets[node_degree].append(node_idx) + + self._is_optimal = True + + def _get_degree(self, node_idx): + """Get node degree.""" + return len(self._adj_dict[node_idx]) + + def _reorder_adj_nodes(self, node_idx): + """Update buckets list with current adjacency list.""" + for adj_node in self._adj_dict[node_idx]: + current_degree = self._get_degree(adj_node) + prev_degree = self._node_degree_dict[adj_node] + if prev_degree != current_degree: + self._buckets[prev_degree].remove(adj_node) + self._buckets[current_degree].insert(0, adj_node) + self._node_degree_dict[adj_node] = current_degree + + def _remove_node(self, node_idx): + """Remove node from graph. Update adjacency list accordingly.""" + node_degree = self._get_degree(node_idx) + self._buckets[node_degree].remove(node_idx) + for adj_node in self._adj_dict[node_idx]: + self._adj_dict[adj_node].remove(node_idx) + + def _insert_edge(self, node_x, node_y, adj_cost_matrix): + """Insert an edge between two nodes.""" + self._layout_transform_interlayer_cost[(node_x, node_y)] = adj_cost_matrix + self._layout_transform_interlayer_cost[(node_y, node_x)] = [] + for i in range(len(adj_cost_matrix[0])): + self._layout_transform_interlayer_cost[(node_y, node_x)].append([]) + for cost_vec in adj_cost_matrix: + self._layout_transform_interlayer_cost[(node_y, node_x)][i].append(cost_vec[i]) + + self._adj_dict[node_x].append(node_y) + self._adj_dict[node_y].append(node_x) + + def _backward_insert_node(self, node_idx): + """Reinsert node in backward pass.""" + for adj_node in self._adj_dict[node_idx]: + self._adj_dict[adj_node].append(node_idx) + + def _RI_reduction(self, node_idx): + """Reduce nodes with degree 1.""" + adj_node = self._adj_dict[node_idx][0] + ltf_matrix = self._layout_transform_interlayer_cost[(adj_node, node_idx)] + for i, cost_vec in enumerate(ltf_matrix): + min_cost = INVALID_LAYOUT_TIME + for j, cost in enumerate(cost_vec): + min_cost = min(min_cost, cost + self._record_cost_dict[node_idx][j]) + self._record_cost_dict[adj_node][i] += min_cost + self._remove_node(node_idx) + self._reorder_adj_nodes(node_idx) + self._stack.append(node_idx) + + def _RII_reduction(self, node_idx): + """Reduce nodes with degree 2.""" + adj_node_x, adj_node_y = self._adj_dict[node_idx] + ltf_matrix_x = self._layout_transform_interlayer_cost[(adj_node_x, node_idx)] + ltf_matrix_y = self._layout_transform_interlayer_cost[(adj_node_y, node_idx)] + delta_matrix = [[] for _ in range(len(ltf_matrix_x))] + for i, cost_vec_x in enumerate(ltf_matrix_x): + for j, cost_vec_y in enumerate(ltf_matrix_y): + min_cost = INVALID_LAYOUT_TIME + for k in range(len(self._record_cost_dict[node_idx])): + min_cost = min( + min_cost, + cost_vec_x[k] + cost_vec_y[k] + self._record_cost_dict[node_idx][k], + ) + delta_matrix[i].append(min_cost) + + if adj_node_x == adj_node_y: + for i, delta_row in enumerate(delta_matrix): + self._record_cost_dict[adj_node_x][i] += delta_row[i] + elif adj_node_x in self._adj_dict[adj_node_y]: + for i, _ in enumerate(delta_matrix): + for j, delta in enumerate(delta_matrix[i]): + self._layout_transform_interlayer_cost[(adj_node_x, adj_node_y)][i][j] += delta + self._layout_transform_interlayer_cost[(adj_node_y, adj_node_x)][j][i] += delta + else: + self._insert_edge(adj_node_x, adj_node_y, delta_matrix) + + self._remove_node(node_idx) + self._reorder_adj_nodes(node_idx) + self._stack.append(node_idx) + + def _RN_reduction(self, node_idx): + """Reduce nodes with degree greater than 2.""" + min_cost = INVALID_LAYOUT_TIME + record_idx = -1 + + for i, record_cost in enumerate(self._record_cost_dict[node_idx]): + current_cost = record_cost + for adj_node in self._adj_dict[node_idx]: + ltf_matrix = self._layout_transform_interlayer_cost[(node_idx, adj_node)] + adj_record_cost = list(self._record_cost_dict[adj_node]) + for j, ltf_cost in enumerate(ltf_matrix[i]): + adj_record_cost[j] += ltf_cost + current_cost += min(adj_record_cost) + if current_cost < min_cost: + min_cost = current_cost + record_idx = i + + if record_idx < 0: + raise RuntimeError( + f"Can't find a soltuion for node {node_idx} when applying RN reduction" + ) + self._optimal_record_dict[node_idx] = record_idx + self._is_optimal = False + + for adj_node in self._adj_dict[node_idx]: + ltf_matrix = self._layout_transform_interlayer_cost[(node_idx, adj_node)] + for i, ltf_cost in enumerate(ltf_matrix[record_idx]): + self._record_cost_dict[adj_node][i] += ltf_cost + + self._remove_node(node_idx) + self._reorder_adj_nodes(node_idx) + self._stack.append(node_idx) + + def _forward(self): + """Forward pass in PBQP to reduce nodes.""" + while True: + if self._buckets[1]: + node_idx = self._buckets[1][0] + self._RI_reduction(node_idx) + elif self._max_degree >= 2 and self._buckets[2]: + node_idx = self._buckets[2][0] + self._RII_reduction(node_idx) + elif self._max_degree >= 3: + max_degree_node = -1 + for i in range(self._max_degree, 2, -1): + if self._buckets[i]: + max_degree_node = self._buckets[i][0] + self._RN_reduction(max_degree_node) + break + if max_degree_node < 0: + break + else: + break + + def _backward(self): + """Backward pass in PBQP to generate optimal solution.""" + # Solve nodes left in the forward graph + for node_idx in self._buckets[0]: + record_costs = self._record_cost_dict[node_idx] + min_cost = min(record_costs) + self._optimal_record_dict[node_idx] = record_costs.index(min_cost) + + # Solve nodes with one or two degrees + for node_idx in reversed(self._stack): + self._backward_insert_node(node_idx) + if node_idx not in self._optimal_record_dict: + record_costs = list(self._record_cost_dict[node_idx]) + for adj_node in self._adj_dict[node_idx]: + adj_optimal_idx = self._optimal_record_dict[adj_node] + for i, _ in enumerate(record_costs): + record_costs[i] += self._layout_transform_interlayer_cost[ + (node_idx, adj_node) + ][i][adj_optimal_idx] + min_cost = min(record_costs) + self._optimal_record_dict[node_idx] = record_costs.index(min_cost) + + def run(self, **kwargs): + """Run partitioned boolean quadratic programming tuner.""" + self._logger.info("Start to run PBQP algorithm...") + # Define virtual record lists and layout transformaton matrices + # for multi-input nodes. + input_names = self._input_shapes.keys() + temp = {} + for key, val in self._in_nodes_dict.items(): + target_input_idx = -1 + target_input_pos = -1 + if has_multiple_inputs(self._node_list, key, input_names, self._opt_out_op): + for i, item in enumerate(val): + node = self._node_list[item] + if not is_boundary_node(node, input_names): + target_input_idx = item + target_input_pos = i + break + + # Skip boundary operator + if target_input_idx < 0: + continue + + temp[(target_input_idx, key)] = [] + record_candidates = self._node_list[target_input_idx]["record_candidates"] + for j in range(len(record_candidates)): + temp[(target_input_idx, key)].append([]) + for k in range(len(record_candidates)): + temp[(target_input_idx, key)][j].append( + 0 if j == k else INVALID_LAYOUT_TIME + ) + + for j in range(target_input_pos + 1, len(val)): + input_idx = val[j] + input_node = self._node_list[input_idx] + if is_boundary_node(input_node, input_names): + continue + temp[(input_idx, key)] = self._layout_transform_interlayer_cost[ + (input_idx, target_input_idx) + ] + self._layout_transform_interlayer_cost.update(temp) + + # Create reverse layout transformation matrices + temp = {} + for idx_pair, ltf_matrix in self._layout_transform_interlayer_cost.items(): + reverse_key = (idx_pair[1], idx_pair[0]) + reverse_matrix = [[] for _ in range(len(ltf_matrix[0]))] + for i, _ in enumerate(ltf_matrix): + for j, ltf in enumerate(ltf_matrix[i]): + reverse_matrix[j].append(ltf) + temp[reverse_key] = reverse_matrix + self._layout_transform_interlayer_cost.update(temp) + + self._forward() + self._backward() + is_optimal = "optimal" if self._is_optimal else "sub-optimal" + msg = f"Finished PBQPExecutor run. Got {is_optimal} solution." + self._logger.info(msg) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..21a16b8dcab11c14c9bf07f2dae35da7c9fd02a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__init__.py @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=wildcard-import +"""Graph tuner utility functions""" +from __future__ import absolute_import + +from . import traverse_graph +from . import utils + +from .traverse_graph import expr2graph, get_direct_ancestor, get_in_nodes, get_out_nodes +from .utils import has_multiple_inputs, is_boundary_node, bind_inputs diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..433a4a202760a002901b68208583f680db923f8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/traverse_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/traverse_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfd43a0b67203d1f58bd42cd62b29d595d2de5a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/traverse_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b20edbe9803c5307aebfffdd608d05fc2fdb5700 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1ce36ba941fb1171a452135937e79a51d151e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py @@ -0,0 +1,334 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=too-many-locals,too-many-statements,too-many-branches,protected-access +"""API for graph traversing.""" +import threading +import re + +import tvm +from tvm import relay, autotvm +from tvm.relay import transform +from tvm.relay.expr import Call, TupleGetItem, Var, Constant, Tuple +from tvm.relay.function import Function +from tvm.relay.ty import TupleType, TensorType +from tvm.autotvm.task import TaskExtractEnv + +from .utils import has_multiple_inputs, is_boundary_node, is_skipped_node +from .._base import OPT_OUT_OP + + +def expr2graph(expr, target_ops, node_dict, node_list, tvm_target): + """Convert relay expr to graph data structure + and fetch workloads of target operators. + + Parameters + ---------- + expr : tvm.relay.Expr.Function + Input relay function expression. + + target_ops: List of tvm.ir.Op + List of target relay ops + + node_dict : dictionary from tvm.relay.Expr to int + Dictionary to record node index + + node_list : list of dictionary + List of nodes which contains all expr in the input relay function. + Each node will be stored as a dictionary in the format of + {"op": str, "node": tvm.relay.expr, "inputs": [int], "types": [tvm.relay.Type], + "name": str, "workloads": [tuple], "topi_op": [function]} + + tvm_target : tvm.target + The TVM target object. + """ + # TODO(@kevinthesun, @icemelon9): Currently graph tuning pass relies on the fact + # that # autotvm tasks == # ops. But this won't be true after having relay op + # strategy. We need to find a solution to fix this. + env = TaskExtractEnv.get(allow_duplicate=True) + env.reset(target_ops) + # pylint: disable=not-context-manager + with env: + _expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target) + task_pos = 0 + for node_entry in node_list: + if node_entry["op"] in target_ops: + task_name, args = env.task_collection[task_pos] + task = autotvm.task.create(task_name, args, target=tvm_target) + node_entry["workloads"] = [task.workload] + node_entry["topi_op"] = [task_name] + task_pos += 1 + + +def _infer_type(node): + """A method to infer the type of a relay expression.""" + mod = tvm.IRModule.from_expr(node) + mod = transform.InferType()(mod) + entry = mod["main"] + return entry if isinstance(node, relay.Function) else entry.body + + +def _replace_device_with_tracing(target): + """This is to replace -device=XXX with -device=tracing in the tvm_target string. + It is a stand-along function for testability. + We need to have device=tracing in order to fetch the workloads, it is not used + for anything beyond that so it is safe to override the device here only.""" + target = str(target) + if "-device" in target: + return re.sub("-device=[^\\-$]+", "-device=tracing ", target).strip(" ") + return target + " -device=tracing" + + +def _expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target): + """Implementation to convert relay expr to graph data structure""" + + def _traverse_expr(node): + if node in node_dict: + return + node_index = len(node_list) + node_entry = {"node": node, "inputs": [], "types": [], "op": None, "name": None} + + if isinstance(node, Call): + op = node.op + node_entry["op"] = node.op + for arg in node.args: + in_node_idx = node_dict[arg] + if isinstance(arg, (Tuple, TupleGetItem)): + node_entry["inputs"] += node_list[in_node_idx]["inputs"] + else: + node_entry["inputs"].append([in_node_idx, 0, 0]) + infer_out = _infer_type(node) + out_type = infer_out._checked_type_ + if isinstance(out_type, TensorType): + node_entry["types"].append(out_type) + elif isinstance(out_type, TupleType): + for tupe_type in out_type.fields: + node_entry["types"].append(tupe_type) + else: + raise RuntimeError( + f"Unsupported output type {type(out_type)} in operator {op.name}" + ) + + # Utilize tracing target to fetch workload with topo-order. + # Since we only need workload, dummy target can be used to + # create task. + if op in target_ops: + params = [] + for i, input_idx in enumerate(node_entry["inputs"]): + input_node_entry = node_list[input_idx[0]] + input_type = input_node_entry["types"][input_idx[1]] + if not isinstance(input_node_entry["node"], (Var, Constant, Call)): + raise RuntimeError( + "Graph tuner can only tune target " + "operators with input node of type " + "relay.expr.Var/Constant/Call. Now " + "find a target op %s with input type %s" + % (op, str(type(input_node_entry["node"]))) + ) + free_var = relay.Var(f"var_{i}", input_type) + params.append(free_var) + call = relay.Call(node.op, params, node.attrs) + mod = tvm.IRModule.from_expr(relay.Function(params, call)) + relay.backend.te_compiler.get().clear() + tracing_target = _replace_device_with_tracing(tvm_target) + build_thread = threading.Thread(target=relay.build, args=(mod, tracing_target)) + build_thread.start() + build_thread.join() + elif isinstance(node, Var): + node_entry["name"] = node.name_hint + node_entry["types"] = [node.type_annotation] + elif isinstance(node, Function): + # Ignore root node since it equals to input function expression + if node != expr: + _expr2graph_impl(node, target_ops, node_dict, node_list, tvm_target) + return + elif isinstance(node, TupleGetItem): + in_node_idx = node_dict[node.tuple_value] + node_entry["inputs"].append([in_node_idx, node.index, 0]) + elif isinstance(node, Tuple): + for tuple_item in node: + in_node_idx = node_dict[tuple_item] + if isinstance(tuple_item, TupleGetItem): + node_entry["inputs"] += node_list[in_node_idx]["inputs"] + elif isinstance(tuple_item, Tuple): + raise RuntimeError("Graph tuner doesn't support nested tuple.") + else: + node_entry["inputs"].append([in_node_idx, 0, 0]) + elif isinstance(node, Constant): + node_entry["name"] = "Constant_" + str(node_index) + node_entry["types"] = [node.checked_type] + elif isinstance(node, tvm.ir.Op): + return + else: + raise RuntimeError(f"Not supported relay node type in graph tuning: {type(node)}") + node_dict[node] = node_index + node_list.append(node_entry) + + relay.analysis.post_order_visit(expr, _traverse_expr) + + +def get_direct_ancestor(node_list, visited_dict, target_ops, node_idx, input_names): + """Given a node_list in relay function and a node index, return the + closest ancestor which has op_name as operator name or is multi_input operator. + + If node has multiple inputs, multiple ancestor nodes will be returned. + + Parameters + ---------- + node_list : list of dict of str to object + List of all nodes in a graph. + + visited_dict : dict of int to int + Nodes and corresponding ancestors which have been visited. + + target_ops: List of str + List of target relay base op name + + node_idx : int + Input node index. + + input_names : list of str + Names of graph input nodes. + + Returns + ------- + out : list of int + List of ancestor node index. + """ + if node_idx in visited_dict: + return visited_dict[node_idx] + node = node_list[node_idx] + if is_boundary_node(node, input_names): + return [node_idx] + + node_direct_ancestor = [] + for item_idx in node["inputs"]: + item = node_list[item_idx[0]] + is_multiple_inputs = has_multiple_inputs(node_list, item_idx[0], input_names, OPT_OUT_OP) + if item["op"] in target_ops or is_multiple_inputs: + node_direct_ancestor.append(item_idx[0]) + else: + tmp = get_direct_ancestor(node_list, visited_dict, target_ops, item_idx[0], input_names) + for tmp_item in tmp: + if tmp_item not in node_direct_ancestor: + node_direct_ancestor.append(tmp_item) + visited_dict[node_idx] = node_direct_ancestor + return node_direct_ancestor + + +def get_in_nodes(node_list, target_ops, input_names): + """Create a dictionary mapping from op_name nodes or multi-input + nodes to closest input ancestors. + + Parameters + ---------- + node_list : list of dict of str to object + List of all nodes in a graph. + + target_ops: List of str + List of target relay op + + input_names : list of str + Names of graph input nodes. + + Returns + ------- + out : dict of int to list of int + Dictionary maps node index to closest input ancestors. + """ + + visited_dict = {} + in_node_dict = {} + for i, node in enumerate(node_list): + if is_boundary_node(node, input_names) or is_skipped_node(node): + continue + get_direct_ancestor(node_list, visited_dict, target_ops, i, input_names) + for key, val in visited_dict.items(): + node = node_list[key] + is_multiple_inputs = has_multiple_inputs(node_list, key, input_names, OPT_OUT_OP) + if node["op"] in target_ops or is_multiple_inputs: + in_node_dict[key] = val + + # Reduce boundary nodes + out_node_dict = get_out_nodes(in_node_dict) + has_reduced_node = True + while has_reduced_node: + boundary_nodes = [] + for key, val in in_node_dict.items(): + node = node_list[key] + is_boundary = True + # Target ops can't be boundary nodes + if node["op"] not in target_ops: + for input_idx in val: + in_node = node_list[input_idx] + if not is_boundary_node(in_node, input_names) and input_idx in in_node_dict: + is_boundary = False + else: + val.remove(input_idx) + if is_boundary: + boundary_nodes.append(key) + if boundary_nodes: + for idx in boundary_nodes: + if idx in in_node_dict: + del in_node_dict[idx] + else: + has_reduced_node = False + + # Remove empty nodes to ignore pre-computed sub-graph + has_empty_node = True + while has_empty_node: + empty_nodes = [] + for key, val in in_node_dict.items(): + if not val: + empty_nodes.append(key) + if empty_nodes: + has_empty_node = True + for node in empty_nodes: + del in_node_dict[node] + if node in out_node_dict: + for out_node in out_node_dict[node]: + in_node_dict[out_node].remove(node) + else: + has_empty_node = False + + return in_node_dict + + +def get_out_nodes(in_node_dict): + """Create output dictionary from input dictionary. + + Parameters + ---------- + in_node_dict : dict of int to list of int + Dictionary maps node index to closest input ancestors. + It can be created with get_in_nodes. + + Returns + ------- + out : dict of int to list of int + Dictionary maps node index to closest output nodes. + """ + out_node_dict = {} + for key in in_node_dict: + out_node_dict[key] = [] + for key, val in in_node_dict.items(): + for item in val: + if item in out_node_dict: + out_node_dict[item].append(key) + else: + out_node_dict[item] = [key] + + return out_node_dict diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/utils.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..54e0d1cb36b2afa1c88a10cf20327bad89c88f37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/graph_tuner/utils/utils.py @@ -0,0 +1,158 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=eval-used,invalid-name,too-many-arguments +"""Utility functions""" +import tvm +from tvm import relay +from tvm.relay import transform + + +def has_multiple_inputs(node_list, node_idx, input_names, opt_out_op): + """Check whether a node has multiple input nodes + except variable nodes. + + Parameters + ---------- + node_list : list of dict of str to object + List of all nodes in a graph. + + node_idx : int + Node index to be checked. + + input_names : list of str + List of input names of graph. + + Returns + ------- + out : bool + Whether the specified node has multiple input nodes + """ + num_inputs = 0 + node = node_list[node_idx] + for in_idx in node["inputs"]: + in_idx = in_idx[0] + in_node = node_list[in_idx] + # Exclude parameter nodes + if in_node["op"] is not None and in_node["op"].name in opt_out_op: + increase = False + for t_idx in in_node["inputs"]: + increase = has_multiple_inputs(node_list, t_idx[0], input_names, opt_out_op) + if increase: + num_inputs += 1 + elif in_node["op"] is not None or ("name" in in_node and in_node["name"] in input_names): + num_inputs += 1 + return num_inputs > 1 + + +def is_boundary_node(node_entry, input_names): + """Whether a node is a boundary node. + Currently input node and nodes in LAYOUT_FIXED_OP are + counted as boundary. + + Parameters + ---------- + node_entry : dict + Node entry. + + input_names : list of str + List of input names of graph. + + Returns + ------- + out : bool + whether node is a boundary node. + """ + # Operators dependent on original layouts. + _LAYOUT_FIXED_OP = [ + relay.op.get(name) + for name in ( + "nn.batch_flatten", + "transpose", + "reshape", + "vision.multibox_prior", + "vision.multibox_transform_loc", + "where", + "vision.non_max_suppression", + "strided_slice", + ) + ] + + out = node_entry["op"] in _LAYOUT_FIXED_OP or ( + "name" in node_entry and node_entry["name"] in input_names + ) + return out + + +def is_skipped_node(node_entry): + """Whether a node is not counted. + + Parameters + ---------- + node_entry : dict + Node entry. + + Returns + ------- + out : bool + whether node is skipped. + """ + # Operators not counted in graph tuner. + return isinstance(node_entry["node"], relay.Tuple) + + +def bind_inputs(expr, input_shapes=None, input_dtypes="float32"): + """Bind input variables of a relay function expression + to new shapes and/or dtypes. + + Parameters + ---------- + expr : tvm.relay.Expr.Function + Input relay function expression. + + input_shapes : dict of str to tuple of int, optional + Input shapes. + + input_dtypes : str or dict of str to str, optional + Input dtypes. + + Returns + ------- + out : tvm.relay.Expr.Function + Bind relay function expression. + """ + if input_shapes is None: + return expr + if isinstance(input_dtypes, str): + input_dtypes = {key: input_dtypes for key in input_shapes.keys()} + + updated_input_dict = {} + for input_name in input_shapes.keys(): + updated_input = relay.var( + input_name, shape=input_shapes[input_name], dtype=input_dtypes[input_name] + ) + updated_input_dict[input_name] = updated_input + + rebind_dict = {} + for var in expr.params: + if var.name_hint in updated_input_dict: + rebind_dict[var] = updated_input_dict[var.name_hint] + updated_expr = relay.expr.bind(expr, rebind_dict) + + mod = tvm.IRModule.from_expr(updated_expr) + mod = transform.InferType()(mod) + entry = mod["main"] + return entry if isinstance(updated_expr, relay.Function) else entry.body diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10b0843402ea7f1067bfc3e5f498c70bc7e17265 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__init__.py @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Distributed executor infrastructure to scale up the tuning""" + +from .measure import ( + MeasureInput, + MeasureResult, + MeasureErrorNo, + measure_option, + create_measure_batch, +) +from .measure_methods import ( + LocalBuilder, + LocalRunner, + RPCRunner, + default_module_loader, + request_remote, +) +from .executor import Executor diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a812d96cdc16ac95bcec21ab9aedd3ed33d72098 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcd8154a09071b28beec5377c273047c163d0ac2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ebf7b3a511efb5936988f3ca308875beefaad19 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/executor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/executor.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/executor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dc765959cba96ffde1dc03f0d66ca5e191e0acb Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/executor.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..712b347276d3a1d439a406720276e3930cd5da13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17f97ed5f0260129ab1565bdd4ce47b6cc1d3d2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure_methods.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4db33af876261d133af7ca60c26d3d00a23de1fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure_methods.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure_methods.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure_methods.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66a28159f2adfd90422503b516474b1b2d185618 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/__pycache__/measure_methods.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/executor.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..f8eca7298c644e4e8c2ed6d33c1f4b81508a3990 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/executor.py @@ -0,0 +1,102 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Abstraction for asynchronous job execution """ + + +class Executor(object): + """ + Base abstract executor interface for asynchronous job submission. + Allows submit asynchronous jobs and returns the Future object. + """ + + # timeout for jobs that may hang + DEFAULT_TIMEOUT = 120 + + def submit(self, func, *args, **kwargs): + """ + Pass task (function, arguments) to the Executor. + + Parameters + ---------- + func : callable + function to be run by a worker + args : list or tuple, optional + arguments passed to the function + kwargs : dict, optional + The keyword arguments + + Returns + ------- + future : Future + Future object wrapping the task which can be used to + collect the task's result. + """ + raise NotImplementedError() + + +class Future(object): + """ + Base class of the future object. + The implementations can return object of subclass of this. + This objects encapsulates the asynchronous execution of task + submitted to another thread, or another worker for execution. + + Future objects store the state of tasks--can be polled for + result or a blocking call to retrieve the result can be used. + """ + + def done(self): + """ + Return True if job was successfully cancelled or finished running. + """ + raise NotImplementedError() + + def get(self, timeout=None): + """ + Get the result. This will block until the result is available. + + Parameters + ---------- + timeout : int or float, optional + Maximum number of seconds to wait before it timeouts. + If not specified, it means we block until the result is available. + + Returns + ------- + result : Any + The result returned by the submitted function. + + Raises + ------ + TimeoutError : if the result call timeouts. + """ + raise NotImplementedError() + + +class FutureError(RuntimeError): + """Base error class of all future events""" + + +# pylint:disable=redefined-builtin +class TimeoutError(FutureError): + """Error raised when a task is timeout.""" + + +class ExecutionError(FutureError): + """ + Error raised when future execution crashes or failed. + """ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/measure.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/measure.py new file mode 100644 index 0000000000000000000000000000000000000000..c9b82cd81c54012bc40c854d812f7fa1745a51a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/measure.py @@ -0,0 +1,296 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=pointless-string-statement,consider-using-enumerate,invalid-name +"""User facing API for specifying how to measure the generated code""" +import enum +import multiprocessing +from collections import namedtuple + + +class MeasureInput(namedtuple("MeasureInput", ["target", "task", "config"])): + """ + Stores all the necessary inputs for a measurement. + + Parameters + ---------- + target : tvm.target.Target + The target device + task : task.Task + Task function + config : ConfigEntity + Specific configuration. + """ + + +class MeasureResult(namedtuple("MeasureResult", ["costs", "error_no", "all_cost", "timestamp"])): + """ + Stores all the results of a measurement + + Parameters + ---------- + costs: Array of float or Array of Exception + If no error occurs during measurement, it is an array of measured running times. + If an error occurs during measurement, it is an array of the exception objections. + error_no: int + Denote error type, defined by MeasureErrorNo + all_cost: float + All cost of this measure, including rpc, compilation, test runs + timestamp: float + The absolute time stamp when we finish measurement. + """ + + def __repr__(self): + error_no_str = ( + str(MeasureErrorNo(self.error_no)) + if isinstance(self.error_no, (MeasureErrorNo, int)) + else str(self.error_no) + ) + return ( + f"{self.__class__.__name__}(costs={self.costs!r}, error_no={error_no_str}, " + f"all_cost={self.all_cost}, timestamp={self.timestamp!r})" + ) + + +class MeasureErrorNo(enum.IntEnum): + """Error type for MeasureResult""" + + NO_ERROR = 0 # no error + INSTANTIATION_ERROR = 1 # actively detected error in instantiating a template with a config + COMPILE_HOST = 2 # error when compiling code on host (e.g. tvm.build) + COMPILE_DEVICE = 3 # error when compiling code on device (e.g. OpenCL JIT on the device) + RUNTIME_DEVICE = 4 # error when run program on device + WRONG_ANSWER = 5 # answer is wrong when compared to a golden output + BUILD_TIMEOUT = 6 # timeout during compilation + RUN_TIMEOUT = 7 # timeout during run + UNKNOWN_ERROR = 8 # unknown error + + +class Builder(object): + """Builder that builds programs in tuning + + Parameters + ---------- + timeout: float, optional + The timeout of a build task + n_parallel: int, optional + The number of tasks submitted in parallel + By default it will use all cpu cores + build_kwargs: dict, optional + Keyword args given to the build function. + """ + + def __init__(self, timeout=10, n_parallel=None, build_kwargs=None): + self.timeout = timeout + self.n_parallel = n_parallel or multiprocessing.cpu_count() + self.user_build_kwargs = build_kwargs if build_kwargs is not None else {} + self.runner_build_kwargs = None + self.task = None + + def set_task(self, task, build_kwargs=None): + """ + Initialize for a new tuning task + + Parameters + ---------- + task: Task + The tuning task + build_kwargs: dict, optional + The additional kwargs for build function + """ + self.task = task + self.build_kwargs = dict(build_kwargs.items()) if build_kwargs is not None else {} + if any(k in self.build_kwargs for k in self.user_build_kwargs): + logging.warn( + "Overriding these runner-supplied kwargs with user-supplied:\n%s", + "\n".join( + f" * {k}: from {build_kwargs[k]!r} to {self.user_build_kwargs[k]!r}" + for k in sorted([k for k in build_kwargs if k in self.user_build_kwargs]) + ), + ) + for k, v in self.user_build_kwargs.items(): + self.build_kwargs[k] = v + + def build(self, measure_inputs): + """Build programs + + Parameters + ---------- + measure_inputs: List of MeasureInput + The measure input + + Returns + ------- + build_results: List of BuildResult + The build result. + """ + raise NotImplementedError() + + +class Runner(object): + """Runner that runs and measures the time cost of a generated program in tuning + + Parameters + ---------- + timeout: float, optional + The timeout of a build task + n_parallel: int, optional + The number of tasks submitted in parallel + By default it will use all cpu cores + """ + + def __init__(self, timeout=5, n_parallel=None): + self.timeout = timeout + self.n_parallel = n_parallel or multiprocessing.cpu_count() + self.task = None + + def set_task(self, task): + """ + Initialize for a new tuning task + + Parameters + ---------- + task: Task + The tuning task + """ + self.task = task + + def get_build_kwargs(self): + """ + Get device specific build arguments (e.g. maximum shared memory size) + + Returns + ---------- + kwargs: dict + The additional keyword arguments + """ + raise NotImplementedError() + + def run(self, measure_inputs, build_results): + """Run amd measure built programs + + Parameters + ---------- + measure_inputs: List of MeasureInput + The raw measure input + build_results: List of BuildResults + The build results + + Returns + ------- + measure_results: List of MeasureResult + The final results of measurement + """ + raise NotImplementedError() + + +def measure_option(builder, runner): + """ + Set options for measure. To measure a config, we will build it and run it. + So we have to set options for these two steps. + They have their own options on timeout, parallel, etc. + + Parameters + ---------- + builder: Builder + Specify how to build programs + runner: Runner + Specify how to run programs + + Examples + -------- + # example setting for using local devices + >>> measure_option = autotvm.measure_option( + >>> builder=autotvm.LocalBuilder(), # use all local cpu cores for compilation + >>> runner=autotvm.LocalRunner( # measure them sequentially + >>> number=10, + >>> timeout=5) + >>> ) + + # example setting for using remote devices + >>> measure_option = autotvm.measure_option( + >>> builder=autotvm.LocalBuilder(), # use all local cpu cores for compilation + >>> runner=autotvm.RPCRunner( + >>> 'rasp3b', 'locahost', 9190, # device key, host and port of the rpc tracker + >>> number=4, + >>> timeout=4) # timeout of a run on the device. RPC request waiting time is excluded. + >>>) + + Note + ---- + To make measurement results accurate, you should pick the correct value for the argument + `number` and `repeat` in Runner(). Some devices need a certain minimum running time to + "warm up," such as GPUs that need time to reach a performance power state. + Using `min_repeat_ms` can dynamically adjusts `number`, so it is recommended. + The typical value for NVIDIA GPU is 150 ms. + """ + # pylint: disable=import-outside-toplevel + from .measure_methods import LocalBuilder, LocalRunner + + if isinstance(builder, str): + if builder == "local": + builder = LocalBuilder() + else: + raise ValueError("Invalid builder: " + builder) + + if isinstance(runner, str): + if runner == "local": + runner = LocalRunner() + else: + raise ValueError("Invalid runner: " + runner) + + opt = { + "builder": builder, + "runner": runner, + } + + return opt + + +def create_measure_batch(task, option): + """Get a standard measure_batch function. + + Parameters + ---------- + task: tvm.autotvm.task.Task + The tuning task + option: dict + The option for measuring generated code. + You should use the return value of function :any:`measure_option` for this argument. + + Returns + ------- + measure_batch: callable + a callback function to measure a batch of configs + """ + builder = option["builder"] + runner = option["runner"] + + attach_objects = runner.set_task(task) + + # feed device related information from runner to builder + # (e.g. max shared memory for validity checking) + build_kwargs = runner.get_build_kwargs() + builder.set_task(task, build_kwargs) + + def measure_batch(measure_inputs): + build_results = builder.build(measure_inputs) + results = runner.run(measure_inputs, build_results) + return results + + measure_batch.n_parallel = builder.n_parallel + measure_batch.attach_objects = attach_objects + return measure_batch diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/measure_methods.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/measure_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..c57ebfc88bd25a7fb71cc11dc69fc5e1d5d28ac2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/measure/measure_methods.py @@ -0,0 +1,863 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks +""" +Functions that run on executor for measurement. + +These functions are responsible for building the tvm module, uploading it to +remote devices, recording the running time costs, and checking the correctness of the output. +""" + +import contextlib +import logging +import os +import shutil +import tempfile +import threading +import time +import traceback +import typing +import warnings +from collections import namedtuple +from random import getrandbits + +import tvm._ffi +import tvm.ir.transform +from tvm import nd +from tvm import rpc as _rpc +from tvm.autotvm.env import AutotvmGlobalScope, reset_global_scope +from tvm.contrib import ndk, stackvm, tar +from tvm.contrib.popen_pool import PopenPoolExecutor +from tvm.driver import build +from tvm.error import TVMError +from tvm.target import Target + +from ..env import AutotvmGlobalScope +from ..task.space import InstantiationError +from ..utils import get_const_tuple +from .measure import Builder, MeasureErrorNo, MeasureResult, Runner + +logger = logging.getLogger("autotvm") + + +class BuildResult(namedtuple("BuildResult", ("filename", "arg_info", "error", "time_cost"))): + """ + Stores all the necessary inputs for a measurement. + + Parameters + ---------- + filename : str + The filename of generated library + arg_info : Tuple + The shape and dtype information of tvm tensor arguments + error : Exception + The error happens during compilation. + time_cost : float + The time cost of building + """ + + +class LocalBuilder(Builder): + """Run compilation on local machine + + Parameters + ---------- + timeout: float + The timeout of a compilation + n_parallel: int + The number of tasks run in parallel. "None" will use all cpu cores + build_kwargs: dict + If supplied, additional kwargs passed to build_func. Overrides any build_kwargs supplied + by the Runner. + build_func: callable or str + If is 'default', use default build function + If is 'ndk', use function for android ndk + If id 'stackvm', use function for stackvm + If is callable, use it as custom build function, expect lib_format field. + do_fork: bool + If False, do not fork when building. Requires n_parallel=1. + runtime: Optional[Runtime] + Specify the runtime to generate artifacts for + """ + + def __init__( + self, + timeout=10, + n_parallel=None, + build_kwargs=None, + build_func="default", + do_fork=False, + runtime=None, + ): + super(LocalBuilder, self).__init__(timeout, n_parallel, build_kwargs) + + if isinstance(build_func, str): + if build_func == "default": + build_func = tar.tar + elif build_func == "ndk": + build_func = ndk.create_shared + elif build_func == "stackvm": + build_func = stackvm.build + else: + raise ValueError("Invalid build_func" + build_func) + self.build_func = _WrappedBuildFunc(build_func, runtime) + if not do_fork: + assert n_parallel in ( + None, + 1, + ), f"if do_fork=False, need n_parallel=None or 1; got {n_parallel}" + self.executor = PopenPoolExecutor( + timeout=timeout, initializer=reset_global_scope, initargs=(AutotvmGlobalScope.current,) + ) + self.tmp_dir = tempfile.mkdtemp() + + def build(self, measure_inputs): + results = [] + + shutil.rmtree(self.tmp_dir, ignore_errors=True) + self.tmp_dir = tempfile.mkdtemp() + + for i in range(0, len(measure_inputs), self.n_parallel): + futures = [] + for inp in measure_inputs[i : i + self.n_parallel]: + ret = self.executor.submit(self.build_func, inp, self.tmp_dir, **self.build_kwargs) + futures.append(ret) + + for future in futures: + try: + res = future.result() + if res.error is not None: + assert len(res.error) == 2, ( + f"BuildResult errors should be a 2-tuple, but it is a {len(res.error)}" + "-tuple. This should not happen!" + ) + tb, exception = res.error + # instantiation error + if isinstance(exception, InstantiationError): + res = MeasureResult( + (tb, exception), + MeasureErrorNo.INSTANTIATION_ERROR, + res.time_cost, + time.time(), + ) + + else: + if "InstantiationError" in str(exception): + msg = str(exception) + try: + msg = msg.split("\n")[-2].split(": ")[1] + except Exception: # pylint: disable=broad-except + pass + res = MeasureResult( + (tb, InstantiationError(msg)), + MeasureErrorNo.INSTANTIATION_ERROR, + res.time_cost, + time.time(), + ) + + else: # tvm error + res = MeasureResult( + (tb, res.error), + MeasureErrorNo.COMPILE_HOST, + res.time_cost, + time.time(), + ) + except TimeoutError as ex: + tb = traceback.format_exc() + res = MeasureResult( + (tb, ex), MeasureErrorNo.BUILD_TIMEOUT, self.timeout, time.time() + ) + except ChildProcessError as ex: + tb = traceback.format_exc() + res = MeasureResult( + (tb, ex), MeasureErrorNo.RUNTIME_DEVICE, self.timeout, time.time() + ) + + results.append(res) + + return results + + +class RPCRunner(Runner): + """Run generated code on remove devices. + This function will ask a RPC Tracker to get device for measurement. + + Parameters + ---------- + timeout: float + The timeout of a RPCRunner measurement task + n_parallel: int + The number of tasks run in parallel. "None" will use all cpu cores + key: str + The key of the device registered in the tracker + host: str + The host address of RPC Tracker + port: int + The port of RPC Tracker + number: int + The number of times to run the generated code for taking average. + We call these runs as one `repeat` of measurement. + repeat : int, optional + The number of times to repeat the measurement. + In total, the generated code will be run (1 + number x repeat) times, + where the first "1" is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + min_repeat_ms: int, optional + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + cooldown_interval: float, optional + The cool down interval between two measurements. + enable_cpu_cache_flush: bool + Whether to flush cache on CPU between repeated measurements. + Flushing cache can make the measured latency of one operator closer to + its actual latency during end-to-end inference. + To make this option effective, the argument `number` should also be set to 1. + This is only has effect on CPU task. + module_loader : ModuleLoader + If given, a context manager that loads the module to be timed into the remote runtime. + If not given, default_module_loader is used. + """ + + def __init__( + self, + key, + host, + port, + priority=1, + timeout=10, + n_parallel=None, + number=4, + repeat=3, + min_repeat_ms=0, + cooldown_interval=0.1, + enable_cpu_cache_flush=False, + module_loader=None, + ): + super(RPCRunner, self).__init__(timeout, n_parallel) + + self.key = key + self.host = host + self.port = port + self.priority = priority + self.timeout = timeout + + self.number = number + self.repeat = repeat + self.min_repeat_ms = min_repeat_ms + self._ref_input = None + + self.enable_cpu_cache_flush = enable_cpu_cache_flush + self.cooldown_interval = cooldown_interval + self.module_loader = module_loader + + self.executor = PopenPoolExecutor( + timeout=timeout * (self.n_parallel + 1), + initializer=reset_global_scope, + initargs=(AutotvmGlobalScope.current,), + ) + + @property + def ref_input(self): + """ + Fixed input for tuning special operators, e.g., sparse operators + requiring indices as input. + """ + return self._ref_input + + @ref_input.setter + def ref_input(self, val): + if val is not None: + warnings.warn( + "You are specifying fixed input for tuning the operator. " + "Be sure your input always fits the operator. Some " + "operators may conduct layout transformation during tuning, " + "thus can lead to unexpected behaviors. ", + RuntimeWarning, + ) + self._ref_input = val + + def set_task(self, task): + self.task = task + + if check_remote(task.target, self.key, self.host, self.port): + logger.info("Get devices for measurement successfully!") + else: + raise RuntimeError( + "Cannot get remote devices from the tracker. " + "Please check the status of tracker by " + "'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' " + "and make sure you have free devices on the queue status." + ) + + def get_build_kwargs(self): + kwargs = {"checks": {}} + if ( + "cuda" in self.task.target.keys + or "opencl" in self.task.target.keys + or "rocm" in self.task.target.keys + or "vulkan" in self.task.target.keys + ): + remote = request_remote(self.key, self.host, self.port) + dev = remote.device(str(self.task.target), 0) + max_dims = dev.max_thread_dimensions + kwargs["checks"]["gpu"] = { + "max_shared_memory_per_block": dev.max_shared_memory_per_block, + "max_threads_per_block": dev.max_threads_per_block, + "max_thread_x": max_dims[0], + "max_thread_y": max_dims[1], + "max_thread_z": max_dims[2], + } + if "hexagon" in self.task.target.keys: + kwargs["checks"]["hexagon"] = {"vtcm_capacity": self.task.target.vtcm_capacity} + + return kwargs + + def run(self, measure_inputs, build_results): + results = [] + remote_kwargs = dict( + device_key=self.key, + host=self.host, + port=self.port, + priority=self.priority, + timeout=self.timeout, + ) + + for i in range(0, len(measure_inputs), self.n_parallel): + futures = [] + for measure_inp, build_res in zip( + measure_inputs[i : i + self.n_parallel], build_results[i : i + self.n_parallel] + ): + module_loader = ( + self.module_loader + if self.module_loader is not None + else default_module_loader() + ) + ret = self.executor.submit( + run_through_rpc, + measure_inp, + build_res, + self.number, + self.repeat, + self.min_repeat_ms, + self.cooldown_interval, + remote_kwargs, + self.ref_input, + self.enable_cpu_cache_flush, + module_loader, + ) + futures.append(ret) + + for future in futures: + try: + res = future.result() + results.append(res) + except Exception as ex: # pylint: disable=broad-except + tb = traceback.format_exc() + results.append( + MeasureResult( + (tb, ex), MeasureErrorNo.RUN_TIMEOUT, self.timeout, time.time() + ) + ) + + return results + + +class LocalRunner(RPCRunner): + """Run generated code on local devices. + + Parameters + ---------- + timeout: float + The timeout of a compilation + number: int + The number of times to run the generated code for taking average. + We call these runs as one `repeat` of measurement. + repeat : int, optional + The number of times to repeat the measurement. + In total, the generated code will be run (1 + number x repeat) times, + where the first one is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + min_repeat_ms: int, optional + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + cooldown_interval: float, optional + The cool down interval between two measurements. + enable_cpu_cache_flush: bool + Whether to flush cache on CPU between repeated measurements. + Flushing cache can make the measured latency of one operator closer to + its actual latency during end-to-end inference. + To make this option effective, the argument `number` should also be set to 1. + This is only has effect on CPU task. + Note + ---- + This is a "fake" local mode. We start a silent rpc tracker and rpc server + for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure. + """ + + def __init__( + self, + timeout=10, + number=4, + repeat=3, + min_repeat_ms=0, + cooldown_interval=0.1, + enable_cpu_cache_flush=False, + module_loader=None, + ): + super(LocalRunner, self).__init__( + "", + None, + None, + 0, + timeout=timeout, + n_parallel=1, + number=number, + repeat=repeat, + min_repeat_ms=min_repeat_ms, + cooldown_interval=cooldown_interval, + enable_cpu_cache_flush=enable_cpu_cache_flush, + module_loader=module_loader, + ) + self.tracker = None + self.server = None + + def set_task(self, task): + # pylint: disable=import-outside-toplevel + from ...rpc.server import Server + from ...rpc.tracker import Tracker + + self.task = task + tracker = Tracker(port=9000, port_end=10000, silent=True) + device_key = f"$local$device${tracker.port}" + server = Server( + port=9000, + port_end=10000, + key=device_key, + silent=True, + tracker_addr=("127.0.0.1", tracker.port), + ) + self.key = device_key + self.host = "127.0.0.1" + self.port = tracker.port + + super(LocalRunner, self).set_task(task) + return server, tracker + + +def _build_func_common(measure_input, runtime=None, checks=None, build_option=None): + """Common part for building a configuration""" + target, task, config = measure_input + target, task.target_host = Target.canon_target_and_host(target, task.target_host) + checks = checks or {} + with target: + s, args = task.instantiate(config) + + # check invalidity of template and code hash consistency + if not config.valid(): + raise InstantiationError(config.errors) + + # if target is vta, we need to use vta build + if ( + hasattr(measure_input.target, "device_name") + and measure_input.target.device_name == "vta" + ): + # pylint: disable=import-outside-toplevel + import vta + + func = vta.build(s, args, target_host=task.target_host) + else: + current_pass_context: tvm.ir.transform.PassContext = ( + tvm.ir.transform.PassContext.current() + ) + current_config = dict(current_pass_context.config) + if build_option is not None: + current_config.update(build_option) + + if "tir.add_lower_pass" in current_config: + current_add_lower_pass = list(current_config["tir.add_lower_pass"]) + else: + current_add_lower_pass = [] + if checks.get("gpu"): + current_add_lower_pass.append((2, gpu_verify_pass(**checks.get("gpu")))) + if checks.get("hexagon"): + current_add_lower_pass.append((2, vtcm_verify_pass(**checks.get("hexagon")))) + current_config["tir.add_lower_pass"] = current_add_lower_pass + + with tvm.ir.transform.PassContext( + opt_level=current_pass_context.opt_level, + required_pass=current_pass_context.required_pass, + disabled_pass=current_pass_context.disabled_pass, + instruments=current_pass_context.instruments, + config=current_config, + ): + func = build(s, args, target=target, runtime=runtime) + return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args) + + +class _WrappedBuildFunc: + """ + Wrap build_func to a function that can be used in measure. + + Note: this is a class instead of a closure so that it can be pickled when + using multiprocessing. + + Parameters + ---------- + build_func : The compilation function + We expect fcompile to contain an attr "output_format". + runtime : Optional[Runtime] + The runtime to generate artifacts for + + Returns + ------- + wrapped_build_func : callable + The wrapped build function + """ + + def __init__(self, build_func, runtime=None): + if not hasattr(build_func, "output_format"): + raise AttributeError("Expect build_func to have the attribute output_format.") + self.build_func = build_func + self.runtime = runtime + + def __call__(self, measure_input, tmp_dir, **kwargs): + """ + Wrapped build func. + + Parameters + ---------- + measure_input: MeasureInput + The input of measurement + + tmp_dir: str + The path of temporary directory to export generated library + """ + tic = time.time() + try: + filename = os.path.join( + tmp_dir, f"tmp_func_{getrandbits(64):0x}.{self.build_func.output_format}" + ) + # TODO(tvm-team) consider linline _build_func_common + func, arg_info = _build_func_common(measure_input, self.runtime, **kwargs) + if self.build_func.output_format == ".model-library-format": + # Late import to preserve autoTVM with USE_MICRO OFF + try: + from tvm import micro # pylint: disable=import-outside-toplevel + except ImportError: + raise ImportError("Requires USE_MICRO") + micro.export_model_library_format(func, filename) + else: + func.export_library(filename, fcompile=self.build_func) + except Exception as e: # pylint: disable=broad-except + tb = traceback.format_exc() + return BuildResult(None, None, (tb, e), time.time() - tic) + return BuildResult(filename, arg_info, None, time.time() - tic) + + +ModuleLoader = typing.Callable[ + [dict, dict], typing.ContextManager[typing.Tuple[tvm.rpc.RPCSession, tvm.runtime.Module]] +] + + +def run_through_rpc( + measure_input, + build_result, + number, + repeat, + min_repeat_ms, + cooldown_interval, + remote_kwargs, + ref_input, + enable_cpu_cache_flush=False, + module_loader=None, +): + """Run a generated library through rpc + + Parameters + ---------- + measure_input: MeasureInput + The raw measure input + build_result: BuildResult + The result returned from Builder. This contains the path to the generated library. + number: int + The number of times to run the generated code for taking average. + We call these runs as one `repeat` of measurement. + repeat : int, optional + The number of times to repeat the measurement. + In total, the generated code will be run (1 + number x repeat) times, + where the first one is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + min_repeat_ms: int, optional + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + cooldown_interval: float + The cool down interval between two measurements + remote_kwargs: dict + Passed to module_loader(). Ultimately, keyword args to request_remote(). + ref_input: List of np.ndarray + The reference input used for tuning. Empty for randomly filled input. + enable_cpu_cache_flush: bool + Whether to flush cache on CPU between repeated measurements. + Flushing cache can make the measured latency of one operator closer to + its actual latency during end-to-end inference. + To make this option effective, the argument `number` should also be set to 1. + This is only has effect on CPU task. + module_loader: ModuleLoader + A function that returns a ContextManager used to establish and teardown the remote session. + """ + if isinstance(build_result, MeasureResult): + return build_result + + tic = time.time() + errno = MeasureErrorNo.NO_ERROR + try: + # upload built module + with module_loader(remote_kwargs, build_result) as (remote, mod): + dev = remote.device(str(measure_input.target), 0) + + # Limitation: + # We can not get PackFunction directly in the remote mode as it is wrapped + # under the std::function. We could lift the restriction later once we fold + # the PackedFunc as an object. Currently, we pass function name to work + # around it. + f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else "" + time_f = mod.time_evaluator( + mod.entry_name, + dev, + number=number, + repeat=repeat, + min_repeat_ms=min_repeat_ms, + f_preproc=f_prepare, + ) + + if ref_input: + args = [nd.array(x, device=dev) for x in ref_input] + else: + try: + random_fill = remote.get_function("tvm.contrib.random.random_fill") + except AttributeError: + raise AttributeError( + "Please make sure USE_RANDOM is ON in the config.cmake " + "on the remote devices" + ) + args = [nd.empty(x[0], x[1], dev) for x in build_result.arg_info] + if "scatter" not in measure_input.task.name: + # the index tensor of scatter op cannot be randomly initialized + for arg in args: + random_fill(arg) + dev.sync() + + costs = time_f(*args).results + + if len(costs) > 2: # remove largest and smallest value to reduce variance + costs = list(costs) + costs.sort() + costs = tuple(costs[1:-1]) + except TVMError as exc: + msg = str(exc) + if "Stack trace returned" in msg: + msg = msg[: msg.index("Stack trace returned")] + if "CUDA Source" in msg: + msg = msg[: msg.index("CUDA Source")] + costs = (traceback.format_exc(), RuntimeError(msg[:1024])) + errno = MeasureErrorNo.RUNTIME_DEVICE + tstamp = time.time() + time.sleep(cooldown_interval) + return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp) + + +class DefaultModuleLoader: + """See default_module_loader(). A pickleable emulation of the original function closure.""" + + def __init__(self, pre_load_function=None) -> None: + self.pre_load_function = pre_load_function + + @contextlib.contextmanager + def __call__(self, remote_kwargs, build_result): + remote = request_remote(**remote_kwargs) + if self.pre_load_function is not None: + self.pre_load_function(remote, build_result) + + remote.upload(build_result.filename) + try: + yield remote, remote.load_module(os.path.split(build_result.filename)[1]) + + finally: + # clean up remote files + remote.remove(build_result.filename) + remote.remove(os.path.splitext(build_result.filename)[0] + ".so") + remote.remove("") + + +def default_module_loader(pre_load_function=None): + """Returns a default function that can be passed as module_loader to run_through_rpc. + + Parameters + ---------- + pre_load_function : Optional[Function[tvm.rpc.Session, tvm.runtime.Module]] + Invoked after a session is established and before the default code-loading RPC calls are + issued. Allows performing pre-upload actions, e.g. resetting the remote runtime environment. + + Returns + ------- + DefaultModuleLoader : + A callable that can be passed as module_loader to run_through_rpc. + """ + + # This was a function with a closure before but that couldn't be pickled! + # We need pickle to work for using python's multiprocessing on some platforms. + return DefaultModuleLoader(pre_load_function) + + +def request_remote(device_key, host=None, port=None, priority=1, timeout=60): + """Request a remote session + + Parameters + ---------- + device_key: string + The device key of registered device in tracker + host: host, optional + The host address of rpc tracker. + If is none, will use environment variable "TVM_TRACKER_HOST" + port: int, optional + The port of rpc tracker. + If is none, will use environment variable "TVM_TRACKER_PORT" + priority: int, optional + The priority of this request, larger is more prior + timeout: float, optional + The timeout of this session (units: second) + + Returns + ------ + session: RPCSession + """ + # connect to the tracker + host = host or os.environ["TVM_TRACKER_HOST"] + port = port or int(os.environ["TVM_TRACKER_PORT"]) + + tracker = _rpc.connect_tracker(host, port) + remote = tracker.request(device_key, priority=priority, session_timeout=timeout) + return remote + + +def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10): + """ + Check the availability of a remote device + + Parameters + ---------- + target: Target + The wanted compilation target + device_key: string + device key of registered device in tracker + host: host, optional + The host address of rpc tracker. + If is none, will use environment variable "TVM_TRACKER_HOST" + port: int, optional + The port address of rpc tracker. + If is none, will use environment variable "TVM_TRACKER_PORT" + priority: int, optional + The priority of this request, larger is more prior + timeout: float, optional + The timeout of this check (units: seconds). + + Returns + ------- + available: bool + True if can find available device + """ + + def _check(): + logger.debug("waiting for device...") + remote = request_remote(device_key, host, port, priority) + dev = remote.device(str(target)) + while not dev.exist: # wait until we get an available device + pass + logger.debug("device available") + + t = threading.Thread(target=_check) + t.start() + t.join(timeout) + + remote = request_remote(device_key, host, port, priority) + dev = remote.device(str(target)) + return dev.exist + + +def set_cuda_target_arch(arch): + """THIS API IS DEPRECATED. + + set target architecture of nvcc compiler + + Parameters + ---------- + arch: str or list + The argument of nvcc -arch. (e.g. "sm_51", "sm_62") + it can also be a count of gencode arguments pass to nvcc command line, + e.g., ["-gencode", "arch=compute_52,code=sm_52", "-gencode", "arch=compute_70,code=sm_70"] + """ + raise ValueError( + "The API 'autotvm.measure.set_cuda_target_arch' is deprecated." + "Try specifying it by adding '-arch=sm_xx' to your target, such as 'cuda -arch=sm_86'." + "See https://github.com/apache/tvm/pull/9544 for the upgrade guide." + ) + + +def gpu_verify_pass(**kwargs): + """Verify the validity of a gpu kernel. + This pass will check memory usage and number of threads per block. + """ + + def verify_pass(f, *_): + valid = tvm.tir.analysis.verify_gpu_code(f, kwargs) + if not valid: + raise InstantiationError("Skipped because of invalid gpu kernel") + return f + + return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0) + + +def vtcm_verify_pass(**kwargs): + """Verify the validity of a hexagon kernel. + This pass will check vtcm memory usage. + """ + + def verify_pass(f, *_): + sizes = tvm.tir.analysis.calculate_allocated_bytes(f) + vtcm_capacity = kwargs.get("vtcm_capacity", 0) + vtcm_allocated = sizes.get("global.vtcm", 0) + if 0 < vtcm_capacity < vtcm_allocated: + raise InstantiationError("Skipped because of invalid vtcm memory usage limit") + + return f + + return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/record.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/record.py new file mode 100644 index 0000000000000000000000000000000000000000..cde78d1dbc312218e20bb419afcd27bdcad5786f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/record.py @@ -0,0 +1,378 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=superfluous-parens, redefined-outer-name, redefined-outer-name,pointless-string-statement +# pylint: disable=consider-using-enumerate,invalid-name +"""Tuning record and serialization format""" + +import argparse +import base64 +from io import TextIOBase +import logging +import pickle +import json +import time +from typing import Union +import os +import itertools +from collections import OrderedDict +import numpy as np + +from .. import build, lower +from ..target import Target +from ..contrib import popen_pool +from .. import __version__ +from . import task +from .task import ConfigEntity, ApplyHistoryBest +from .measure import MeasureInput, MeasureResult + +AUTOTVM_LOG_VERSION = 0.2 +_old_version_warning = True +logger = logging.getLogger("autotvm") + +try: # convert unicode to str for python2 + _unicode = unicode +except NameError: + _unicode = () + +try: + _long = long +except NameError: + _long = int + + +def measure_str_key(inp, include_config=True): + """get unique str key for MeasureInput + + Parameters + ---------- + inp: autotvm.measure.MeasureInput + input for the measure + include_config: bool, optional + whether includes config in the str key + + Returns + ------- + key: str + The str representation of key + """ + config_str = str(inp.config) if include_config else "" + return "".join( + [str(inp.target), inp.task.name, str(inp.task.args), str(inp.task.kwargs), config_str] + ) + + +def encode(inp, result, protocol="json"): + """encode (MeasureInput, MeasureResult) pair to a string + + Parameters + ---------- + inp: autotvm.measure.MeasureInput + result: autotvm.measure.MeasureResult + pair of input/result + protocol: str + log protocol, json or pickle + + Returns + ------- + row: str + a row in the logger file + """ + + if protocol == "json": + json_dict = { + "input": (str(inp.target), inp.task.name, inp.task.args, inp.task.kwargs), + "config": inp.config.to_json_dict(), + "result": ( + result.costs if result.error_no == 0 else (1e9,), + result.error_no, + result.all_cost, + result.timestamp, + ), + "version": AUTOTVM_LOG_VERSION, + "tvm_version": __version__, + } + return json.dumps(json_dict) + if protocol == "pickle": + row = ( + str(inp.target), + str( + base64.b64encode( + pickle.dumps([inp.task.name, inp.task.args, inp.task.kwargs]) + ).decode() + ), + str(base64.b64encode(pickle.dumps(inp.config)).decode()), + str(base64.b64encode(pickle.dumps(tuple(result))).decode()), + str(AUTOTVM_LOG_VERSION), + str(__version__), + ) + return "\t".join(row) + + raise RuntimeError("Invalid log protocol: " + protocol) + + +def decode(row, protocol="json"): + """Decode encoded record string to python object + + Parameters + ---------- + row : str + a row in the logger file + + protocol : str + log protocol, json or pickle + + Returns + ------- + ret : tuple(autotvm.measure.MeasureInput, autotvm.measure.MeasureResult), or None + The tuple of input and result, or None if input uses old version log format. + """ + # pylint: disable=unused-variable + global _old_version_warning + + if protocol == "json": + row = json.loads(row) + if "v" in row and row["v"] == 0.1: + if _old_version_warning: + logger.warning("AutoTVM log version 0.1 is no longer supported.") + _old_version_warning = False + return None + + tgt, task_name, task_args, task_kwargs = row["input"] + tgt = str(tgt) + if "-target" in tgt: + logger.warning('"-target" is deprecated, use "-mtriple" instead.') + tgt = tgt.replace("-target", "-mtriple") + tgt = Target(str(tgt)) + + def clean_json_to_python(x): + """1. Convert all list in x to tuple (hashable) + 2. Convert unicode to str for python2 + """ + if isinstance(x, list): + return tuple([clean_json_to_python(a) for a in x]) + if isinstance(x, _unicode): + return str(x) + if isinstance(x, (_long, int)): + return int(x) + return x + + tsk = task.Task(clean_json_to_python(task_name), clean_json_to_python(task_args)) + config = ConfigEntity.from_json_dict(row["config"]) + inp = MeasureInput(tgt, tsk, config) + result = MeasureResult(*[tuple(x) if isinstance(x, list) else x for x in row["result"]]) + config.cost = np.mean(result.costs) + + return inp, result + if protocol == "pickle": + items = row.split("\t") + if len(items) == 4: + if _old_version_warning: + logger.warning("AutoTVM log version 0.1 is no longer supported.") + _old_version_warning = False + return None + tgt = Target(items[0]) + task_tuple = pickle.loads(base64.b64decode(items[1].encode())) + config = pickle.loads(base64.b64decode(items[2].encode())) + result = MeasureResult(*pickle.loads(base64.b64decode(items[3].encode()))) + config.cost = np.mean(result.costs) + + tsk = task.Task(task_tuple[0], task_tuple[1]) + return MeasureInput(tgt, tsk, config), result + + raise RuntimeError("Invalid log protocol: " + protocol) + + +def load_from_buffer(file: TextIOBase): + """Generator: load records from buffer. + This is a generator that yields the records. + + Parameters + ---------- + file: io.TextIOBase + + Yields + ------ + input: autotvm.measure.MeasureInput + result: autotvm.measure.MeasureResult + """ + for row in file: + if row and not row.startswith("#"): + ret = decode(row) + if ret is None: + continue + yield ret + + +def load_from_file(filepath: Union[str, bytes, os.PathLike]): + """Generator: load records from path. + This is a generator that yields the records. + + Parameters + ---------- + filepath: str, bytes, or os.PathLike + + Yields + ------ + input: autotvm.measure.MeasureInput + result: autotvm.measure.MeasureResult + """ + with open(filepath) as f: + for row in f: + if row and not row.startswith("#"): + ret = decode(row) + if ret is None: + continue + yield ret + + +def split_workload(in_file, clean=True): + """Split a log file into separate files, each of which contains only a single workload + This function can also delete duplicated records in log file + + Parameters + ---------- + in_file: str + input filename + clean: bool + whether delete duplicated items + """ + tic = time.time() + lines = list(open(in_file).readlines()) + + logger.info("start converting...") + pool = popen_pool.PopenPoolExecutor() + lines = [rec for rec in pool.map(decode, lines) if rec is not None] + logger.info("map done %.2f", time.time() - tic) + + wkl_dict = OrderedDict() + for inp, res in lines: + wkl = measure_str_key(inp, False) + if wkl not in wkl_dict: + wkl_dict[wkl] = [] + wkl_dict[wkl].append([inp, res]) + + if clean: + for i, (k, v) in enumerate(wkl_dict.items()): + # clean duplicated items + added = set() + cleaned = [] + for inp, res in v: + str_key = measure_str_key(inp) + if str_key in added: + continue + added.add(str_key) + cleaned.append([inp, res]) + + # write to file + logger.info("Key: %s\tValid: %d\tDup: %d\t", k, len(cleaned), len(v) - len(cleaned)) + with open(args.i + f".{i:03d}.wkl", "w") as fout: + for inp, res in cleaned: + fout.write(encode(inp, res) + "\n") + else: + for i, (k, v) in enumerate(wkl_dict.items()): + logger.info("Key: %s\tNum: %d", k, len(v)) + with open(args.i + f".{i:03d}.wkl", "w") as fout: + for inp, res in v: + fout.write(encode(inp, res) + "\n") + + +def pick_best(in_file, out_file): + """ + Pick the best entries from a file and store them to another file. + This function distills the useful log entries from a large log file. + If out_file already exists, the best entries from both + in_file and out_file will be saved. + + Parameters + ---------- + in_file: str + The filename of input + out_file: str or file + The filename of output + """ + context = load_from_file(in_file) + if os.path.isfile(out_file): + out_context = load_from_file(out_file) + context = itertools.chain(context, out_context) + context, context_clone = itertools.tee(context) + best_context = ApplyHistoryBest(context) + best_set = set() + + for v in best_context.best_by_model.values(): + best_set.add(measure_str_key(v[0])) + + for v in best_context.best_by_targetkey.values(): + best_set.add(measure_str_key(v[0])) + + logger.info("Extract %d best records from the %s", len(best_set), in_file) + fout = open(out_file, "w") if isinstance(out_file, str) else out_file + + for inp, res in context_clone: + if measure_str_key(inp) in best_set: + fout.write(encode(inp, res) + "\n") + best_set.remove(measure_str_key(inp)) + + +""" +Usage: +This record executable module has three modes. + +* Print log file in readable format +e.g. python -m tvm.autotvm.record --mode read --i collect_conv.log --begin 0 --end 5 --ir --code + +* Extract history best from a large log file +e.g. python -m tvm.autotvm.record --mode pick --i collect.log + +* Split a log file into separate files, each of which contains only a single wkl +e.g. python -m tvm.autotvm.record --mode split --i collect.log +""" +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--mode", choices=["read", "pick", "split"], default="read") + parser.add_argument("--i", type=str, help="input file") + parser.add_argument("--o", type=str, default=None, help="output file") + parser.add_argument("--begin", type=int, default=0) + parser.add_argument("--end", type=int, default=5) + parser.add_argument("--ir", action="store_true") + parser.add_argument("--code", action="store_true") + + args = parser.parse_args() + logging.basicConfig(level=logging.INFO) + + if args.mode == "pick": + args.o = args.o or args.i + ".best.log" + pick_best(args.i, args.o) + elif args.mode == "read": + for i, (inp, result) in enumerate(load_from_file(args.i)): + if args.begin <= i < args.end: + with inp.target: + s, arg_bufs = inp.task.instantiate(inp.config) + + print("") + print(inp.target, inp.task, inp.config) + print(result) + + if args.ir: + with inp.target: + print(lower(s, arg_bufs, simple_mode=True)) + + if args.code: + with inp.target: + func = build(s, arg_bufs) + print(func.imported_modules[0].get_source()) + elif args.mode == "split": + split_workload(args.i) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3949d324c4df577551d5365ab9d933c05697a55e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__init__.py @@ -0,0 +1,52 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Task is a tunable composition of template functions. + +Tuner takes a tunable task and optimizes the joint configuration +space of all the template functions in the task. +This module defines the task data structure, as well as a collection(zoo) +of typical tasks of interest. +""" + +from .task import ( + Task, + create, + get_config, + args_to_workload, + template, + serialize_args, + deserialize_args, +) +from .space import ConfigSpace, ConfigEntity +from .code_hash import attach_code_hash, attach_code_hash_to_arg +from .dispatcher import ( + DispatchContext, + ApplyConfig, + ApplyFixedConfig, + ApplyHistoryBest, + FallbackContext, + clear_fallback_cache, + ApplyGraphBest, +) + +from .topi_integration import ( + register_topi_compute, + register_topi_schedule, + TaskExtractEnv, + get_workload, +) +from .relay_integration import extract_from_program, extract_from_multiple_program diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dea52930327f845d7bc73a8f10435a7580a679ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2713e92253ebf361fa747bb053ea1a9de141bead Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/code_hash.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/code_hash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..963ec85b5fd7e924343215c2b5257e1cc0111b9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/code_hash.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/code_hash.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/code_hash.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f718ae99751432037dc123b8bde922ad254e3a9a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/code_hash.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/dispatcher.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/dispatcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c84b1281974773adeb6abe62fdf2536427a82c9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/dispatcher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/dispatcher.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/dispatcher.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6589173ec7213afb255eae9a56b02532149130d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/dispatcher.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/relay_integration.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/relay_integration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39f83be8e13e281084a87ce35d71138e678be7d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/relay_integration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/relay_integration.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/relay_integration.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3e08f4a99d7883c004baeba76762125d19fb0b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/relay_integration.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/space.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/space.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c6dbd2bcba5a1d11488a49d0445cd147cb70bbd Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/space.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/space.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/space.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04c18c14e0b4d00d4f0ccc1bf24ed5ab069dd095 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/space.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/task.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/task.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77092cb695fe8889ae21945d0fa7b33f287322aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/task.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/task.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/task.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c4073489a7f4a31bf9bf405d3b7130e6a4ca35a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/task.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/topi_integration.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/topi_integration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2144f3cb39b20d0fcb96f01ee47a0a32e998278 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/topi_integration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/topi_integration.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/topi_integration.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee6254b87c212fd585dcaf7afe52852ceaf14ea2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/__pycache__/topi_integration.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/code_hash.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/code_hash.py new file mode 100644 index 0000000000000000000000000000000000000000..2bd053da7244d32dcd0da9d8eea5bf663b8f089b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/code_hash.py @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Decorator functions for hashing schedule code + +code hashing is used to check the consistence of schedule code and the parameters loaded from log +""" +import functools +import inspect +import zlib + +from tvm.te import schedule + + +def attach_code_hash(s): + """Decorator for attaching a code hash to a schedule + + Parameters + ---------- + s: Schedule + tvm.te.schedule.Schedule to attach the hash to + """ + + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + func(*args, **kwargs) + raw_hash = zlib.crc32("".join(inspect.getsourcelines(func)[0]).encode()) + s.code_hash = hex(raw_hash)[2:] + + return wrapper + + return decorator + + +def attach_code_hash_to_arg(arg_idx=1): + """Decorator for attaching a code hash to a schedule + + Parameters + ---------- + arg_idx: int + index of the argument (expected to be a Schedule) to attach the code + hash to + """ + + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + func(*args, **kwargs) + assert isinstance(args[arg_idx], schedule.Schedule) + raw_hash = zlib.crc32("".join(inspect.getsourcelines(func)[0]).encode()) + args[arg_idx].code_hash = hex(raw_hash)[2:] + + return wrapper + + return decorator diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/dispatcher.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d5c290f4544001bb7478dee859660d351e3204 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/dispatcher.py @@ -0,0 +1,524 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Template dispatcher module. + +A dispatcher is a function that can contains multiple behaviors. +Its specific behavior is can be controlled by DispatchContext. + +DispatchContext is used in two ways, usually via different implementation +of the DispatchContext base class. + +- During search, we can use it to pass the current proposal from tuner. +- During evaluation, we can use it to set pick the best policy. +""" +# pylint: disable=invalid-name + +from __future__ import absolute_import as _abs + +from io import TextIOBase +import logging +from os import PathLike +from pathlib import Path +from typing import List, Iterable, Tuple, Union + +import numpy as np + +from .space import FallbackConfigEntity +from .. import env as _env +from ..measure import MeasureInput, MeasureResult + +logger = logging.getLogger("autotvm") + +Records = Union[ + Union[str, bytes, Path], # Path-like objects + TextIOBase, # File-like objects + Iterable[Tuple[MeasureInput, MeasureResult]], +] + + +class DispatchContext(object): + """ + Base class of dispatch context. + + DispatchContext enables the target and workload + specific dispatch mechanism for templates. + """ + + current = None + # a set to prevent print duplicated message + warning_messages = set() + + def __init__(self): + self._old_ctx = DispatchContext.current + + def query(self, target, workload): + """ + Query the context to get the specific config for a template. + If cannot find the result inside this context, this function will query it + from the upper contexts. + + Parameters + ---------- + target: Target + The current target + workload : Workload + The current workload. + + Returns + ------- + cfg : ConfigSpace + The specific configuration. + """ + ret = self._query_inside(target, workload) + if ret is None: + ret = self._old_ctx.query(target, workload) + return ret + + def update(self, target, workload, cfg): + """ + Update context with a specific config. + + Parameters + ---------- + target: Target + The current target + workload : Workload + The current workload. + cfg : ConfigSpace + The specific configuration. + + Note + ---- + This interface is for cases when TVM decides to replace an operator in the graph. + For example, `AlterOpLayout` pass (enables when `opt_level = 3`) replaces `NCHW` + convolution with `NCHW[x]c` implementation on x86 CPUs. + Thus in TOPI, we first query schedule using original `NCHW` workload, + then update the dispatcher with the new `NCHW[x]c` workload. + So that later on, `NCHW[x]c` convolution can get schedule from the dispatcher using + its own workload directly. + + .. code-block:: python + + @conv2d_alter_layout.register("cpu") + def _alter_conv2d_layout(attrs, inputs, tinfo): + workload = get_conv2d_workload(...) + dispatch_ctx = autotvm.task.DispatchContext.current + target = tvm.target.Target.current() + config = dispatch_ctx.query(target, workload) + + # Get conv2d_NCHWc workload from config + # new_workload = ... + # new_inputs = ... + # new_attrs = ... + + # Store altered operator's config + dispatch_ctx.update(target, new_workload, config) + return sym.contrib.conv2d_NCHWc(*new_inputs, **new_attrs) + + We directly store `config` back because `conv2d_NCHW` and `conv2d_NCHWc` + share the same schedule parameters. + One can construct a new `ConfigEntity` if this is not the case. + """ + raise NotImplementedError() + + def _query_inside(self, target, workload): + """ + Query the context to get the specific config for a template. + This function only query config inside this context. + + Parameters + ---------- + target: Target + The current target + workload : Workload + The current workload. + + Returns + ------- + cfg : ConfigSpace + The specific configuration. + """ + raise NotImplementedError() + + def __enter__(self): + self._old_ctx = DispatchContext.current + DispatchContext.current = self + return self + + def __exit__(self, ptype, value, trace): + DispatchContext.current = self._old_ctx + + +class ApplyConfig(DispatchContext): + """Apply a deterministic config entity for all queries. + + Parameters + ---------- + config : ConfigSpace or ConfigEntity + The specific configuration we care about. + """ + + def __init__(self, config): + super(ApplyConfig, self).__init__() + self._config = config + self.workload = None + + def _query_inside(self, target, workload): + """Override query""" + self.workload = workload + return self._config + + def update(self, target, workload, cfg): + """Override update""" + self.workload = workload + self._config = cfg + + +class ApplyFixedConfig(DispatchContext): + """Apply a config of a deterministic schedule. + This is used for building a single Relay operator with deterministic schedule + for testing schedules at Relay level. + + Parameters + ---------- + tasks : list[tvm.autotvm.task.task.Task] + List of autoTVM tasks. + schedule_names : str, List[str] + Name of schedules to use. + """ + + def __init__(self, tasks, schedule_names: Union[str, List[str]]): + super(ApplyFixedConfig, self).__init__() + if isinstance(schedule_names, str): + self._schedule_names = list(schedule_names) + elif isinstance(schedule_names, list): + self._schedule_names = schedule_names + else: + raise RuntimeError("Incorrect type: " + schedule_names) + self._tasks = tasks + self.workload = None + + def _query_inside(self, target, workload): + """Override query""" + self.workload = workload + + # Create a config from correct task + for task in self._tasks: + if task.name == workload[0]: + config = task.config_space.get(0) + break + + if not config: + raise RuntimeError(f"workload: {str(workload)} does not exist in {str(self._tasks)}") + # Add low cost to the target schedule and high cost to others. + if workload[0] in self._schedule_names: + config.cost = 1e-6 + else: + config.cost = 100000 + return config + + def update(self, target, workload, cfg): + """Override update""" + self.workload = workload + self._config = cfg + + +class ApplyHistoryBest(DispatchContext): + """ + Apply the history best config + + Parameters + ---------- + records : None, Records, or iterator of Records objects, where a + Records object is a path-like object, a file-like object, + or an iterator of (MeasureInput, MeasureResult). + + Collection of tuning records. If multiple Records objects are passed, their + contents will be merged. + """ + + def __init__(self, records: Union[None, Records, Iterable[Records]]): + super(ApplyHistoryBest, self).__init__() + + self.best_by_targetkey = {} + self.best_by_model = {} + self._best_user_defined = {} + + if records: + self.load(records) + + def load(self, records: Union[Records, Iterable[Records]]): + """Load records to this dispatch context + + Parameters + ---------- + records : str, list of str, or iterator of (autotvm.measure.MeasureInput,\ + autotvm.measure.MeasureResult) + + Collection of tuning records. If multiple Records objects are passed, their + contents will be merged. + """ + # pylint: disable=import-outside-toplevel + from ..record import load_from_file, load_from_buffer + + def _unpack_records( + records: Union[Records, Iterable[Records]] + ) -> List[Tuple[MeasureInput, MeasureResult]]: + + if isinstance(records, (str, bytes, PathLike)): + return load_from_file(records) + + if isinstance(records, TextIOBase): + return load_from_buffer(records) + + joint_records = [] + for record in records: + if isinstance(record, Tuple) and isinstance(record[0], MeasureInput): + joint_records.append(record) + else: + joint_records += _unpack_records(record) + + return joint_records + + flattened_records = _unpack_records(records) + if not flattened_records: + return + + best_by_targetkey = self.best_by_targetkey + best_by_model = self.best_by_model + + counter = 0 + for inp, res in flattened_records: + counter += 1 + if res.error_no != 0: + continue + + # use target keys in tvm target system as key to build best map + for k in inp.target.keys: + key = (k, inp.task.workload) + if key not in best_by_targetkey: + best_by_targetkey[key] = (inp, res) + else: + _, other_res = best_by_targetkey[key] + if np.mean(other_res.costs) > np.mean(res.costs): + best_by_targetkey[key] = (inp, res) + + # use model as key to build best map + key = (inp.target.model, inp.task.workload) + if key not in best_by_model: + if inp.target.model != "unknown": + best_by_model[key] = (inp, res) + else: + _, other_res = best_by_model[key] + if np.mean(other_res.costs) > np.mean(res.costs): + best_by_model[key] = (inp, res) + + logger.debug("Finish loading %d records", counter) + + def _query_inside(self, target, workload): + if target is None: + raise RuntimeError( + "Need a target context to find the history best. " + "Hint: If your target is llvm, use `with tvm.target.Target('llvm'):`" + " above the dispatcher call. So does other target. " + ) + + # first try matching by model + key = (target.model, workload) + if key in self._best_user_defined: + return self._best_user_defined[key] + if key in self.best_by_model: + inp, _ = self.best_by_model[key] + return inp.config + + # then try matching by target key + for k in target.keys: + key = (k, workload) + if key in self._best_user_defined: + return self._best_user_defined[key] + if key in self.best_by_targetkey: + inp, _ = self.best_by_targetkey[key] + return inp.config + + return None + + def update(self, target, workload, cfg): + model = target.model + key = (model, workload) + # assume user provided config is the best + cfg.cost = 0 + self._best_user_defined[key] = cfg + + for k in target.keys: + key = (k, workload) + self._best_user_defined[key] = cfg + + +class FallbackContext(DispatchContext): + """ + A fallback dispatch context. + + Any tunable template can be called under this context. + This is the root context. + """ + + def __init__(self): + super(FallbackContext, self).__init__() + self.memory = {} + + def _query_inside(self, target, workload): + key = (str(target), workload) + if key in self.memory: + return self.memory[key] + + if not _env.GLOBAL_SCOPE.silent: + msg = ( + f"Cannot find config for target={target}, workload={workload}. A fallback " + f"configuration is used, which may bring great performance regression." + ) + if msg not in DispatchContext.warning_messages: + DispatchContext.warning_messages.add(msg) + logger.warning(msg) + cfg = FallbackConfigEntity() + + # cache this config + self.memory[key] = cfg + return cfg + + def clear_cache(self, target, workload): + """Clear fallback cache. Pass the same argument as _query_inside to this function + to clean the cache. + + Parameters + ---------- + target: Target + The current target + workload : Workload + The current workload. + """ + key = (str(target), workload) + if key in self.memory: + del self.memory[key] + + def update(self, target, workload, cfg): + key = (str(target), workload) + self.memory[key] = cfg + + +DispatchContext.current = FallbackContext() + + +def clear_fallback_cache(target, workload): + """Clear fallback cache. Pass the same argument as _query_inside to this function + to clean the cache. + + Parameters + ---------- + target: Target + The current target + workload : Workload + The current workload. + + Note + ---- + This is used in alter_op_layout to clear the bad cache created before call topi compute function + """ + context = DispatchContext.current + while not isinstance(context, FallbackContext): + context = context._old_ctx + context.clear_cache(target, workload) + + +class ApplyGraphBest(DispatchContext): + """Load the graph level tuning optimal schedules. + + The input records should be in the ascending order of + node index for target operator. Usually this can be obtained + with graph tuner. + + This context maintains an internal counter to indicate the current + node index. + """ + + def __init__(self, records: Records): + """ + Parameters + ---------- + records : str or iterator of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult) + Collection of tuning records. + If is str, then it should be the filename of a records log file. + Each row of this file is an encoded record pair. + Otherwise, it is an iterator. + """ + # pylint: disable=import-outside-toplevel + from ..record import load_from_file, load_from_buffer + + super(ApplyGraphBest, self).__init__() + if isinstance(records, (str, bytes, PathLike)): + records = load_from_file(records) + elif isinstance(records, TextIOBase): + records = load_from_buffer(records) + else: + records = list(records) + + self._records = list(records) + self._counter = 0 + self._global_cfg_dict = {} + + def _query_inside(self, target, workload): + """ + Query the context to get config from records. + + Parameters + ---------- + target : Target + The current target + workload : Workload + The current workload. + + Returns + ------- + cfg : ConfigSpace + The specific configuration. + """ + if self._counter < len(self._records): + cfg = self._records[self._counter][0].config + wkl = self._records[self._counter][0].task.workload + if workload is not None: + assert wkl == workload + self._counter += 1 + self.update(target, wkl, cfg) + cfg.workload = wkl + return cfg + key = (str(target), workload) + if key not in self._global_cfg_dict: + msg = ( + f"Config for target={target}, workload={workload} is missing in ApplyGraphBest " + f"context. A fallback configuration is used, which may bring great performance " + f"regression." + ) + logger.warning(msg) + cfg = FallbackConfigEntity() + self._global_cfg_dict[key] = cfg + else: + cfg = self._global_cfg_dict[key] + return cfg + + def update(self, target, workload, cfg): + key = (str(target), workload) + self._global_cfg_dict[key] = cfg diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/relay_integration.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/relay_integration.py new file mode 100644 index 0000000000000000000000000000000000000000..11f40ed62756b171bdaabe8fad748d96639cf907 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/relay_integration.py @@ -0,0 +1,155 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-variable,invalid-name, not-context-manager +""" +Decorator and utilities for the integration with TOPI and Relay +99.9% copy-paste of implementation by @MerryMercy + +""" +import threading +import logging + +import tvm +from tvm.autotvm.task.dispatcher import DispatchContext, FallbackContext +from tvm.target import Target +from .task import create +from .topi_integration import TaskExtractEnv + +logger = logging.getLogger("autotvm") + + +# TODO(moreau89) find a more elegant way to lower for VTAs +def _lower(mod, target, params, opt_level=3): + """Helper to lower VTA properly.""" + # pylint: disable=import-outside-toplevel + from tvm import relay + from tvm.relay.backend import graph_executor_codegen + + if hasattr(target, "device_name") and target.device_name == "vta": + import vta + + with vta.build_config(opt_level=opt_level, disabled_pass={"AlterOpLayout"}): + mod, _ = relay.optimize(mod, target=target, params=params) + grc = graph_executor_codegen.GraphExecutorCodegen(None, target) + grc.codegen(mod, mod["main"]) + return + + # Alter op layout code has been written expecting that tuning is applied + # without it, so we disable AlterOpLayout to maintain that behavior. + with tvm.transform.PassContext(opt_level=opt_level, disabled_pass={"AlterOpLayout"}): + compiler = relay.vm.VMCompiler() + if params: + compiler.set_params(params) + compiler.lower(mod, target=target) + + +def extract_from_program(mod, params, target, target_host=None, ops=None): + """Extract tuning tasks from a relay program. + + This function is the single program version of extract_from_multiple_program. + + Parameters + ---------- + mod: tvm.IRModule or relay.function.Function + The module or function to tune + params: dict of str to numpy array + The associated parameters of the program + target: tvm.target.Target + The compilation target + target_host: tvm.target.Target + The host compilation target + ops: List[tvm.ir.Op] or None + List of relay ops to be tuned. If not specified, all tunable ops will be extracted. + + Returns + ------- + task: Array of autotvm.task.Task + collected tasks + """ + target, target_host = Target.canon_target_and_host(target, target_host) + return extract_from_multiple_program([mod], [params], target, ops=ops) + + +def extract_from_multiple_program(mods, params, target, target_host=None, ops=None): + """Extract tuning tasks from multiple relay programs. + + This function collects tuning tasks by building a list of programs + with a "tracing" target and tracing all the calls to topi. + + Parameters + ---------- + mods: List[tvm.IRModule] or List[relay.function.Function] + The list of modules or functions to tune + params: List of dict of str to numpy array + The associated parameters of the programs + target: tvm.target.Target + The compilation target + target_host: tvm.target.Target + The host compilation target + ops: List[tvm.ir.Op] or None + List of relay ops to be tuned. If not specified, all tunable ops will be extracted. + + Returns + ------- + task: Array of autotvm.task.Task + collected tasks + """ + # pylint: disable=import-outside-toplevel + from tvm import relay + from tvm import topi + + env = TaskExtractEnv.get() + + # merge target and target host + target, target_host = Target.canon_target_and_host(target, target_host) + + # run compiler to collect all TOPI calls during compilation + env.reset(ops) + with env: + # disable logger temporarily + old_state = logger.disabled + logger.disabled = True + + for mod, param in zip(mods, params): + if isinstance(mod, relay.function.Function): + mod = tvm.IRModule.from_expr(mod) + assert isinstance( + mod, tvm.IRModule + ), "only support relay Module or Function to be tuned" + relay.backend.te_compiler.get().clear() + # wrap build call in thread to avoid multiprocessing problems + build_thread = threading.Thread(target=_lower, args=(mod, target, param)) + build_thread.start() + build_thread.join() + relay.backend.te_compiler.get().clear() + # Clear the warning message cache in FallbackContext + if isinstance(DispatchContext.current, FallbackContext): + DispatchContext.current.memory = {} + DispatchContext.warning_messages = set() + + logger.disabled = old_state + + # create tasks for target + tasks = [] + for task_name, args in env.get_tasks(): + try: + tsk = create(task_name, args, target=target) + tasks.append(tsk) + except topi.InvalidShapeError: + logger.warning("Invalid shape during AutoTVM task creation") + + return tasks diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/space.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/space.py new file mode 100644 index 0000000000000000000000000000000000000000..e81bad694683bbdc624bbcf111f76306e5190f7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/space.py @@ -0,0 +1,1444 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=too-few-public-methods,invalid-name,unused-argument,arguments-differ +# pylint: disable=consider-using-enumerate,too-many-lines, invalid-sequence-index +""" +Template configuration space. + +Each template function can be parameterized by a ConfigSpace. +The space is declared when we invoke the template function with ConfigSpace. +During evaluation, we pass in a ConfigEntity, which contains a specific +entity in the space. This entity contains deterministic parameters. +""" +from __future__ import absolute_import as _abs + +import itertools +import functools +import math +from collections import namedtuple, OrderedDict +from random import randrange +import numpy as np + +from tvm.te import schedule, thread_axis +from tvm.tir import expr +from tvm.autotvm.utils import get_const_int + +Axis = namedtuple("Axis", ["space", "index"]) + +try: + _long = long +except NameError: + _long = int + + +class InstantiationError(ValueError): + """Actively detected error in instantiating a template with a config, + raised by cfg.raise_error + e.g. too many unrolling, too many threads in a block + """ + + +class TransformSpace(object): + """Base class for transform space + TransformSpace is the node in the computation graph of axes + + .. note:: + + We can regard our schedule code as a transformation graph of axes. + Starting from raw axes in the definition of te.compute, we can transform these axes + by some operators. The operator includes 'split', 'reorder' and 'annotate'. + Each operator has some tunable parameters (e.g. the split factor). + Then the tuning process is just to find good parameters of these op. + + So all the combinations of the parameters of these op form our search space. + + Naming convention: + We call the set of all possible values as XXXSpace. (XXX can be Split, Reorder, Config ...) + We call a specific entity in a space as XXXEntity. + """ + + def __init__(self): + self.ins = [] + self.num_output = 0 + self.entities = [] + + def __len__(self): + return len(self.entities) + + def __getitem__(self, index): + """Get an entity of the space by index + + Parameters + ---------- + index: int + + Returns + ------- + transform entity + """ + return self.entities[index] + + @staticmethod + def get_num_output(): + """get number of output axes after this transform + + Returns + ------- + n: int + number of output axes + """ + return 0 + + +class VirtualAxis(TransformSpace): + """Axis placeholder in template + + Parameters + ---------- + var: int or tvm.te.schedule.IterVar + If is int, return a virtual axis whose length is the provided argument. + If is IterVar, return a virtual axis whose length is extracted from + the IterVar's extent domain. + + name: str + """ + + name_ct = 0 + + def __init__(self, var, name=None): + super(VirtualAxis, self).__init__() + self.num_output = 1 + + if name is None: + name = f"axis_{VirtualAxis.name_ct}" + VirtualAxis.name_ct += 1 + + self.name = name + if isinstance(var, (int, _long)): + self.length = var + elif isinstance(var, schedule.IterVar): + self.name = var.var.name + if var.dom is None: + self.length = -1 + else: + self.length = get_const_int(var.dom.extent) + elif isinstance(var, VirtualAxis): + self.length = var.length + else: + raise RuntimeError("Invalid type of axis: " + str(type(var))) + + @staticmethod + def get_num_output(var, name=None): + return 1 + + def __repr__(self): + return f"vaxis({self.name})" + + +def get_factors(n): + """return all factors of an integer + + Parameters + ---------- + n: int + integer to factorize + + Returns + ------- + factors: list + List of all factors + """ + step = 2 if n % 2 else 1 + ret = list( + set( + functools.reduce( + list.__add__, + ([i, n // i] for i in range(1, int(math.sqrt(n)) + 1, step) if n % i == 0), + ) + ) + ) + ret.sort() + return ret + + +def get_pow2s(n): + """return all power-of-two numbers that are less or equal than the integer + + Parameters + ---------- + n: int + integer for reference + + Returns + ------- + factors: list + List of all power-of-two numbers + """ + return [2**x for x in range(math.floor(math.log2(n)) + 1)] + + +class SplitSpace(TransformSpace): + """Split an axis for several times""" + + def __init__(self, axes, policy, **kwargs): + super(SplitSpace, self).__init__() + axis = axes[0] + + self.policy = policy + self.entities = [] + + max_factor = kwargs.get("max_factor", 1 << 31) + fil = kwargs.get("filter", lambda x: True) + self.product = axis.length + self.num_output = kwargs.get("num_outputs", 0) + assert self.num_output > 0 + + if policy == "candidate": + for size in kwargs["candidate"]: + assert len(size) == self.num_output + self.entities.append(SplitEntity(size)) + else: + if policy == "verbose": + # Include factors and power-of-twos. May generate tails. + divisibles = get_factors(self.product) + pow2s = get_pow2s(self.product) + factors = [x for x in list(set(divisibles) | set(pow2s)) if x <= max_factor] + elif policy == "factors": + # Include divisible factors. Guarantee no tails. + factors = [x for x in get_factors(self.product) if x <= max_factor] + elif policy == "power2": + # Include less, equal, and round-up power-of-two numbers. May generate tails. + factors = [x for x in get_pow2s(self.product) if x <= max_factor] + else: + raise RuntimeError(f"Invalid policy: {policy}") + + # Enforce the product of all split factors equals to the axis length + no_tail = kwargs.get("no_tail", policy == "factors") + + # Generate split entity by enumerating candidate factors. + self.factors = factors + self._generate_space(0, [None] * (self.num_output - 1), enforce_no_tail=no_tail) + + self.entities = list(filter(fil, self.entities)) + + def _generate_space(self, now, tmp_stack, enforce_no_tail=False): + """Generate space by DFS""" + if now == self.num_output - 1: + prod = functools.reduce(lambda x, y: x * y, tmp_stack) + if prod > self.product: + return + if self.product % prod == 0 or (not enforce_no_tail and prod < self.product): + self.entities.append(SplitEntity([-1] + tmp_stack[::-1])) + else: + for factor in self.factors: + tmp_stack[now] = factor + self._generate_space(now + 1, tmp_stack, enforce_no_tail) + + @staticmethod + def get_num_output(axes, policy, **kwargs): + return kwargs["num_outputs"] + + def __repr__(self): + return "Split(policy=%s, product=%d, num_outputs=%d) len=%d" % ( + self.policy, + self.product, + self.num_output, + len(self), + ) + + +class SplitEntity(object): + """ + A split operation with detailed parameters + that can apply to an axis + + Parameters + ---------- + size: Array of int + the size of every axis after split. + e.g. an axis of extent 128, we split it into 3 axes, a possible + size is [4, 4, 8] (4x4x8 = 128). + """ + + def __init__(self, size): + self.size = size + + def apply(self, sch, op, axis): + """Apply split to an axis + + Parameters + ---------- + sch: tvm.te.schedule.Schedule + The tvm schedule + op: tvm.te.Operation + The stage to be applied + axis: tvm.te.schedule.IterVar + axis to split + + Returns + ------- + axes : list of Axis + The transformed axes. + """ + ret = [] + for i in range(1, len(self.size)): + ax0, ax1 = sch[op].split(axis, int(np.prod(self.size[i:]))) + ret.append(ax0) + axis = ax1 + return ret + [axis] + + def __repr__(self): + return str(self.size) + + +class ReorderSpace(TransformSpace): + """The parameter space for ordering an array of axes""" + + def __init__(self, axes, policy, **kwargs): + super(ReorderSpace, self).__init__() + self.ins = axes + self.policy = policy + self.num_output = len(axes) + + if policy == "identity": + self.entities = [ReorderEntity(range(len(axes)))] + elif policy == "all": + self.entities = [ReorderEntity(x) for x in itertools.permutations(range(len(axes)))] + elif policy == "interval_all": + begin, end = kwargs["interval"] + sub_space = list(itertools.permutations(range(begin, end))) + prefix, suffix = tuple(range(begin)), tuple(range(end, len(axes))) + self.entities = [ReorderEntity(prefix + x + suffix) for x in sub_space] + elif policy == "candidate": + candidate = kwargs["candidate"] + for can in candidate: + perm = [axes.index(x) for x in can] + self.entities.append(ReorderEntity(perm)) + elif policy == "interleave": + spatial, reduce = kwargs["spatial"], kwargs["reduce"] + + spatial = [[axes.index(x) for x in ch] for ch in spatial] + reduce = [[axes.index(x) for x in ch] for ch in reduce] + + outer_merged = self._merge_chain([x[:-1] for x in spatial]) + inner_merged = self._merge_chain([x[-1:] for x in spatial] + reduce) + + for o in outer_merged: + for i in inner_merged: + self.entities.append(ReorderEntity(o + i)) + elif policy == "interleave_cuda": + spatial, reduce = kwargs["spatial"], kwargs["reduce"] + + spatial = [[axes.index(x) for x in ch] for ch in spatial] + reduce = [[axes.index(x) for x in ch] for ch in reduce] + + outer_merged = self._merge_chain([x[:-1] for x in spatial]) + reduce_merged = self._merge_chain(reduce) + inner_merged = [x[-1] for x in spatial] + + for o in outer_merged: + for r in reduce_merged: + self.entities.append(ReorderEntity(o + r + inner_merged)) + else: + raise RuntimeError("Invalid policy: " + policy) + + @staticmethod + def get_num_output(axes, policy, **kwargs): + return len(axes) + + def __repr__(self): + return f"Reorder(policy={self.policy}) len={len(self)}" + + def _merge_chain(self, chains): + """generate all combinations of merge some chains""" + merged = [] + tmp_pt = [0] * len(chains) + tmp_stack = [] + + size = np.sum([len(x) for x in chains]) + self._merge_dfs(chains, size, tmp_pt, tmp_stack, merged) + return merged + + def _merge_dfs(self, chains, size, tmp_pt, tmp_stack, merged): + if np.sum(tmp_pt) == size: + merged.append(list(tmp_stack)) + return + + for i in range(len(chains)): + # use i == np.argmax(....) here to take spatial order into consideration + # if we don't want to consider spatial order, we can use tmp_pt[i] == np.max(....) + if tmp_pt[i] < len(chains[i]) and ( + i == np.argmax([len(chains[x]) - tmp_pt[x] for x in range(len(chains))]) + ): + tmp_stack.append(chains[i][tmp_pt[i]]) + tmp_pt[i] += 1 + self._merge_dfs(chains, size, tmp_pt, tmp_stack, merged) + tmp_pt[i] -= 1 + tmp_stack.pop() + + +class ReorderEntity(object): + """A reorder operation with detailed parameters that can apply to axes + + Parameters + ---------- + perm: Array of int + define the permutation + """ + + def __init__(self, perm): + self.perm = perm + + def apply(self, sch, op, axes): + """Apply reorder to an array of axes + + Parameters + ---------- + sch: tvm.te.schedule.Schedule + The tvm schedule + op: tvm.te.Operation + The stage to be applied + axis: tvm.te.schedule.IterVar + axis to split + + Returns + ------- + axes : list of Axis + The transformed axes. + """ + if len(axes) == len(self.perm): + new_order = [axes[i] for i in self.perm] + else: + new_order = [axes[i] for i in self.perm if i < len(axes)] + sch[op].reorder(*new_order) + return new_order + + def __repr__(self): + return str(self.perm) + + +class AnnotateSpace(TransformSpace): + """The parameter space for annotating an array of axes""" + + def __init__(self, axes, policy, **kwargs): + super(AnnotateSpace, self).__init__() + + self.ins = axes + self.policy = policy + self.num_output = len(axes) + + if policy == "bind_gpu": + self.num_axis = len(axes) + if self.num_axis >= 6: + self.entities.append( + AnnotateEntity( + ["fuse"] * (self.num_axis - 6) + + [ + "blockIdx.z", + "blockIdx.y", + "blockIdx.x", + "threadIdx.z", + "threadIdx.y", + "threadIdx.x", + ] + ) + ) + elif self.num_axis >= 4: + self.entities.append( + AnnotateEntity( + ["fuse"] * (self.num_axis - 4) + + ["blockIdx.y", "blockIdx.x", "threadIdx.y", "threadIdx.x"] + ) + ) + elif self.num_axis >= 2: + self.entities.append( + AnnotateEntity(["fuse"] * (self.num_axis - 2) + ["blockIdx.x", "threadIdx.x"]) + ) + else: + raise RuntimeError("Unhandled case in bind_gpu") + elif policy == "bind_gpu_virtual": + self.num_axis = len(axes) + if self.num_axis >= 9: + self.entities.append( + AnnotateEntity( + ["fuse"] * (self.num_axis - 9) + + [ + "blockIdx.z", + "blockIdx.y", + "blockIdx.x", + "vthread", + "vthread", + "vthread", + "threadIdx.z", + "threadIdx.y", + "threadIdx.x", + ] + ) + ) + elif self.num_axis >= 6: + self.entities.append( + AnnotateEntity( + ["fuse"] * (self.num_axis - 6) + + [ + "blockIdx.y", + "blockIdx.x", + "vthread", + "vthread", + "threadIdx.y", + "threadIdx.x", + ] + ) + ) + elif self.num_axis >= 3: + self.entities.append( + AnnotateEntity( + ["fuse"] * (self.num_axis - 3) + ["blockIdx.x", "vthread", "threadIdx.x"] + ) + ) + else: + raise RuntimeError("Unhandled case in bind_gpu") + elif policy == "locate_cache": + self.num_axis = len(axes) + num_anchor = kwargs["num_anchor"] + self.anns = list(itertools.combinations(range(self.num_axis), num_anchor)) + self.entities = [AnnotateEntity(x) for x in self.anns] + else: # none, vec, unroll, try_vec, try_unroll, try_vec_unroll, ... + anns = policy.replace("try", "none").split("_") + + for ann in anns: + if ann not in ["none", "unroll", "vec"]: + raise RuntimeError("Invalid policy: " + policy) + + self.num_axis = len(axes) + self.anns = [anns] * self.num_axis + self._generate_space(0, [""] * self.num_axis) + + def _generate_space(self, now, tmp_stack): + """Generate space by DFS""" + if now == self.num_axis: + # only vectorize inner most dimension + vec_ct = tmp_stack.count("vec") + if vec_ct in (0, 1): + self.entities.append(AnnotateEntity(list(tmp_stack))) + else: + for ann in self.anns[now]: + tmp_stack[now] = ann + self._generate_space(now + 1, tmp_stack) + + @staticmethod + def get_num_output(axes, policy, **kwargs): + return len(axes) + + def __repr__(self): + return f"Annotate(policy={self.policy}) len={len(self)}" + + +class AnnotateEntity(object): + """An annotation operation with detailed parameters that can apply to axes + + Parameters + ---------- + anns: Array of string + The annotations of axes + """ + + def __init__(self, anns): + self.anns = anns + + def apply( + self, sch, op, axes, axis_lens=None, max_unroll=None, vec_size=None, cfg=None, source=None + ): + """Apply annotation to an array of axes + + Parameters + ---------- + sch: tvm.te.schedule.Schedule + The tvm schedule + op: tvm.te.Operation + The stage to be applied + axes: Array of tvm.te.schedule.IterVar + axis to split + axis_lens: Array of int, optional + the length of axes + max_unroll: int, optional + maximum unroll step + vec_size: Array of int, optional + valid vector lanes for vectorization + cfg: ConfigEntity, optional + cfg for recording error + source: Array of Array tensor, optional + source tensor for attaching cache + + Returns + ------- + axes : list of tvm.te.schedule.IterVar + The transformed axes + """ + if source is not None: # special case : attach cache_read/cache_write + for src, to in zip(source, self.anns): + for t in src: + sch[t].compute_at(sch[op], axes[to]) + else: # other cases + for i, ann in enumerate(self.anns): + if ann == "none": + pass + elif ann == "unroll": + if max_unroll and axis_lens[i] > max_unroll: + cfg.raise_error("Too large factor for unrolling") + sch[op].unroll(axes[i]) + elif ann == "vec": + if vec_size and axis_lens[i] not in vec_size: + cfg.raise_error("Wrong size of lanes in vectorization") + sch[op].vectorize(axes[i]) + elif ann == "blockIdx.x": + sch[op].bind(axes[i], thread_axis("blockIdx.x")) + elif ann == "blockIdx.y": + sch[op].bind(axes[i], thread_axis("blockIdx.y")) + elif ann == "blockIdx.z": + sch[op].bind(axes[i], thread_axis("blockIdx.z")) + elif ann == "threadIdx.x": + sch[op].bind(axes[i], thread_axis("threadIdx.x")) + elif ann == "threadIdx.y": + sch[op].bind(axes[i], thread_axis("threadIdx.y")) + elif ann == "threadIdx.z": + sch[op].bind(axes[i], thread_axis("threadIdx.z")) + elif ann == "vthread": + sch[op].bind(axes[i], thread_axis("vthread")) + elif ann == "fuse": + assert i < len(axes) - 1 + axes[i + 1] = sch[op].fuse(axes[i], axes[i + 1]) + else: + raise RuntimeError("Invalid annotation " + ann) + return axes + + def __repr__(self): + return str(self.anns) + + +class OtherOptionSpace(TransformSpace): + """The parameter space for general option""" + + def __init__(self, axes, policy, **kwargs): + super(OtherOptionSpace, self).__init__() + + candidate = kwargs["candidate"] + self.entities = [OtherOptionEntity(x) for x in candidate] + + @staticmethod + def get_num_output(axes, policy, **kwargs): + return 0 + + def __repr__(self): + return f"OtherOption({self.entities}) len={len(self)}" + + +class OtherOptionEntity(object): + """The parameter entity for general option, with a detailed value""" + + def __init__(self, val): + self.val = val + + def __repr__(self): + return str(self.val) + + +class ConfigSpace(object): + """The configuration space of a schedule. Pass it as config in template to + collect transformation space and build transform graph of axes + """ + + def __init__(self): + # private dict to provide sugar + self.space_map = OrderedDict() # name -> space + self._collect = True + self._length = None + self._range_length = None + self._dims = None + self._entity_map = OrderedDict() # name -> entity + self._constraints = [] + self.errors = [] + self.code_hash = None + self.flop = 0 + self.cost = None + self.is_fallback = False + self._shared_filter = None + self._shared_filter_cache = None + + @staticmethod + def axis(var): + """get a virtual axis (axis placeholder) + + Parameters + ---------- + var: int or tvm.te.schedule.IterVar + If is int, return an axis whose length is the provided argument. + If is IterVar, return an axis whose length is extracted from the + IterVar's extent domain. + """ + return VirtualAxis(var) + + reduce_axis = axis + + def define_split(self, name, axis, policy="factors", **kwargs): + """Define a new tunable knob which splits an axis into a list of axes + + Parameters + ---------- + name: str + name to index the entity of this space + axis: tvm.te.schedule.IterVar + axis to split + policy: str + name of policy. + If is 'factors', the tuner will try all divisible factors. + If is 'power2', the tuner will try power-of-two factors less or equal to the length. + If is 'verbose', the tuner will try all candidates in above two policies. + If is 'candidate', try given candidates. + **kwargs: + extra arguments for policy + + ``max_factor``: + the maximum split factor (`int`). + ``filter``: + see examples below for how to use filter (`Callable[[int], bool]`). + ``num_outputs``: + the total number of axis after split (`int`). + ``no_tail``: + should we only include divisible numbers as split factors (`bool`). + ``candidate``: + (policy=candidate) manual candidate list (`List`). + + Examples + -------- + >>> # use custom candidates + >>> cfg.define_split('tile_x', x, policy='candidate', num_outputs=3, + >>> candidate=[[1, 4, 4], [4, 1, 4]]) + + >>> # use a filter that only accepts the split scheme whose inner most tile is less then 4 + >>> cfg.define_split('tile_y', y, policy='factors', num_outputs=3, + >>> filter=lambda x: x.size[-1] <= 4) + """ + axes = [axis] + return self._add_new_transform(SplitSpace, name, axes, policy, **kwargs) + + def define_reorder(self, name, axes, policy, **kwargs): + """Define a new tunable knob which reorders a list of axes + + Parameters + ---------- + name: str + name to index the entity of this space + axes: Array of tvm.te.schedule.IterVar + axes to reorder + policy: str + name of policy + If is 'identity', do an identity permutation. + If is 'all', try all permutations. + If is 'interval_all', try all permutations of an interval of axes. + If is 'candidate', try listed candidate. + If is 'interleave', interleave chains of spatial axes and chains of reduction axes. + kwargs: dict + extra arguments for policy + """ + return self._add_new_transform(ReorderSpace, name, axes, policy, **kwargs) + + def define_annotate(self, name, axes, policy, **kwargs): + """Define a new tunable knob which annotates a list of axes + + Parameters + ---------- + name: str + name to index the entity of this space + axes: Array of tvm.te.schedule.IterVar + axes to annotate + policy: str + name of policy + If is 'unroll', unroll the axes. + If is 'try_unroll', try to unroll the axes. + If is 'try_unroll_vec', try to unroll or vectorize the axes. + If is 'bind_gpu', bind the first few axes to gpu threads. + If is 'locate_cache', choose n axes to attach shared/local cache. + kwargs: dict + extra arguments for policy + """ + return self._add_new_transform(AnnotateSpace, name, axes, policy, **kwargs) + + def define_knob(self, name, candidate): + """Define a tunable knob with a list of candidates + + Parameters + ---------- + name: str + name key of that option + candidate: list + list of candidates + """ + return self._add_new_transform(OtherOptionSpace, name, [], None, candidate=candidate) + + def add_flop(self, flop): + """Add float operation statistics for this tuning task + + Parameters + --------- + flop: int or float or IntImm or FloatImm + number of float operations + """ + if isinstance(flop, (expr.IntImm, expr.FloatImm)): + flop = flop.value + self.flop += float(flop) + + def raise_error(self, msg): + """register error in config + Using this to actively detect error when scheduling. + Otherwise these error will occur during runtime, which + will cost more time. + + Parameters + ---------- + msg: str + """ + self.errors.append(msg) + + def valid(self): + """Check whether the config meets all the constraints + + .. note:: + + This check should be called after instantiation of task, + because the ConfigEntity/ConfigSpace collects errors during instantiation + + Returns + ------- + valid: bool + whether the config meets all the constraints + """ + return not bool(self.errors) + + def is_index_valid(self, index): + """Checks if the index satisfies the multi_filter condition + + Parameters + ---------- + index: int + index from the range of the space + + Returns + ------- + valid: bool + whether the index meets all the constraints + """ + assert 0 <= index < self.range_length + if self._shared_filter is None: + return True + if self._shared_filter_cache is None: + self._make_shared_filter_cache() + return self._shared_filter_cache[index] + + def multi_filter(self, filter): # pylint: disable=redefined-builtin + """The filter can restrict combination of parameters in difference to the knob filter, + that restricts only single parameter + + Parameters + ---------- + filter: function + predicate with one argument (Callable[[int], bool]) + + .. note:: + + Using this filter causes additional restrictions on the use of __len__. + Normally, it define the count of valid indexes and the range of space, but when + multi_filter enabled, it requires to use __len__ for getting the count of valid + indexes or range_length for the range of space. It is recommended to use: + ``is_index_valid``, ``get_next_index``, ``get_rand_index`` to bypass the space + + Examples + -------- + >>> # Pre-requisites + >>> candidates = [[16, 64], [32, 32], [64, 16]] + >>> filter = lambda v: v.size[0] != 16 + >>> multi_filter = lambda e: (e["tile_x"].size[0] + e["tile_y"].size[0]) <= 64 + + >>> # Case 1 - without filtering + >>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates) + >>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates) + >>> # [('tile_x', [16, 64]), ('tile_y', [16, 64])],None,0 + >>> # [('tile_x', [32, 32]), ('tile_y', [16, 64])],None,1 + >>> # [('tile_x', [64, 16]), ('tile_y', [16, 64])],None,2 + >>> # [('tile_x', [16, 64]), ('tile_y', [32, 32])],None,3 + >>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,4 + >>> # [('tile_x', [64, 16]), ('tile_y', [32, 32])],None,5 + >>> # [('tile_x', [16, 64]), ('tile_y', [64, 16])],None,6 + >>> # [('tile_x', [32, 32]), ('tile_y', [64, 16])],None,7 + >>> # [('tile_x', [64, 16]), ('tile_y', [64, 16])],None,8 + + >>> # Case 2 - with filter + >>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates, + >>> filter=filter) + >>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates, + >>> filter=filter) + >>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,0 + >>> # [('tile_x', [64, 16]), ('tile_y', [32, 32])],None,1 + >>> # [('tile_x', [32, 32]), ('tile_y', [64, 16])],None,2 + >>> # [('tile_x', [64, 16]), ('tile_y', [64, 16])],None,3 + + >>> # Case 3 - with filter and multi_filter + >>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates, + >>> filter=filter) + >>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates, + >>> filter=filter) + >>> cfg.multi_filter(filter=multi_filter) + >>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,0 + """ + if self._collect: + self.clear_cache() + self._shared_filter = filter + + @property + def range_length(self): + """Length of the index range in the space""" + if self._range_length is None: + self._range_length = int(np.prod([len(x) for x in self.space_map.values()])) + return self._range_length + + @property + def dims(self): + """Dimensions in the space""" + if self._dims is None: + self._dims = [len(x) for x in self.space_map.values()] + return self._dims + + def subrange_length(self, start, end): + """Returns the number of valid indexes within the limited range from [start, end] + + Parameters + ---------- + start: int + start of subrange, inclusive + end: int + end of subrange, exclusive + + Returns + ------- + count: int + number of valid indexes + """ + assert 0 <= start <= end <= self.range_length + if self._shared_filter is None: + return end - start + if self._shared_filter_cache is None: + self._make_shared_filter_cache() + return self._shared_filter_cache[start:end].count(True) + + def get_rand_index(self, start=None, end=None, to_exclude=None): + """Returns a random valid index unlisted to exclusion + + Parameters + ---------- + start: int, optional + specifying at which position to start, inclusive + end: int, optional + specifying at which position to end, exclusive + to_exclude: list, optional + determines unsuitable values + + Returns + ------- + rand: int + random index in the space + + .. note:: + + Excluding all valid space indexes will lead to an infinite loop. + + """ + start = start or 0 + end = end or self.range_length + while True: + index = randrange(start, end) + if self.is_index_valid(index) and index not in (to_exclude or []): + return index + + def get_next_index(self, index, n=1, start=None, end=None): + """Returns the nth valid next index or None if out of range + + Parameters + ---------- + index: int + specifying at which position to start, inclusive + n: int, optional + step by using to find the next index, for the opposite + direction a negative number should be used + start: list, optional + start of subrange, inclusive + end: list, optional + end of subrange, exclusive + + Returns + ------- + next: int + next index in the space + """ + assert n != 0 + start = start or 0 + end = end or self.range_length + if self._shared_filter is None: + index += n + if start <= index < end: + return index + return None + trend = 1 if n > 0 else -1 + counter = abs(n) + while counter != 0: + index += trend + if index < start or index >= end: + return None + if self.is_index_valid(index): + counter -= 1 + return index + + def clear_cache(self): + """Clears the cache of index validity""" + del self._shared_filter_cache + self._dims = None + self._length = None + self._range_length = None + self._shared_filter_cache = None + + def _make_shared_filter_cache(self): + def apply(t): + entities = OrderedDict() + for name, space in self.space_map.items(): + entities[name] = space[t % len(space)] + t //= len(space) + return bool(self._shared_filter(entities)) + + self._shared_filter_cache = tuple(apply(i) for i in range(self.range_length)) + self._length = self._shared_filter_cache.count(True) + + def point2knob(self, point): + """Convert point form (single integer) to knob (vector) + + Parameters + ---------- + point: int + point to convert + + Returns + ------- + knob: list + knob representation of the point + """ + knob = [] + for dim in self.dims: + knob.append(point % dim) + point //= dim + return knob + + def knob2point(self, knob): + """Convert knob form (vector) to point form (single integer) + + Parameters + ---------- + knob: list + knob to convert + + Returns + ------- + point: int + point of the knob representation + """ + point = 0 + for j, k in enumerate(knob): + point += int(np.prod(self.dims[:j])) * k + return point + + def sample_ints(self, m): + """ + Sample m different integer numbers from [0, self.range_length) without replacement + This function is an alternative of `np.random.choice` when self.range_length > 2 ^ 32, in + which case numpy does not work. + + Parameters + ---------- + m: int + The number of sampled int + + Returns + ------- + ints: an numpy array of size m + """ + assert m <= len(self) + vis = set() + while len(vis) < m: + new = randrange(0, self.range_length) + if self.is_index_valid(new): + vis.add(new) + return np.fromiter(vis, int, len(vis)) + + def random_walk(self, point): + """random walk as local transition + + Parameters + ---------- + point: int + index of the ConfigEntity + + Returns + ------- + new_point: int + new neighborhood index + """ + # transform to knob form + old_knob = self.point2knob(point) + new_knob = old_knob.copy() + new_point = self.knob2point(new_knob) + # mutate + while new_knob == old_knob or not self.is_index_valid(new_point): + from_i = np.random.randint(len(old_knob)) + to_v = np.random.randint(self.dims[from_i]) + new_knob[from_i] = to_v + new_point = self.knob2point(new_knob) + # transform to index form + return new_point + + def _add_new_transform(self, space_class, name, axes, policy, **kwargs): + """Add a new transform space in template""" + # if we do not have tuned info (_collect == True) but defined KNOB value + # for "default" scheduling before call of _add_new_transform, in this case + # no need to create new space and override previously pointed KNOB values + if kwargs.get("filter"): + self.clear_cache() + if self._collect and not (self.is_fallback and name in self._entity_map): + # convert schedule axis to space definition axis + axes = [x if isinstance(x, (VirtualAxis, Axis)) else self.axis(x) for x in axes] + + # add subspace (knob) + space = space_class(axes, policy, **kwargs) + self.space_map[name] = space + self._entity_map[name] = space[0] + return [Axis(space, i) for i in range(space.num_output)] + return [Axis(None, i) for i in range(space_class.get_num_output(axes, policy, **kwargs))] + + def __len__(self): + """Returns the number of valid indexes in the space""" + if self._shared_filter is None: + return self.range_length + if self._shared_filter_cache is None: + self._make_shared_filter_cache() + return self._length + + def get(self, index): + """Get a config entity with detailed parameters from this space + + Parameters + ---------- + index: int + index in the space + + Returns + ------- + config: ConfigEntity + config corresponds to the index + """ + if index < 0 or index >= self.range_length: + raise IndexError(f"Index out of range: size {self.range_length}, got index {index}") + if not self.is_index_valid(index): + raise IndexError( + f"Index does not correspond to the multi-filter condition, got index {index}. " + f"Use is_index_valid to pre-check" + ) + entities = OrderedDict() + t = index + for name, space in self.space_map.items(): + entities[name] = space[t % len(space)] + t //= len(space) + ret = ConfigEntity(index, self.code_hash, entities, self._constraints) + return ret + + def __iter__(self): + return self._entity_map.__iter__() + + def __getitem__(self, name): + """get the transform entity(knob) of this entity by name + do not use this to get a ConfigEntity of this space (should use ConfigSpace.get instead) + + Parameters + ---------- + name: str + name of the transform + """ + return self._entity_map[name] + + def __repr__(self): + res = f"ConfigSpace (len={len(self)}, range_length={self.range_length}, space_map=\n" + for i, (name, space) in enumerate(self.space_map.items()): + res += f" {i:2d} {name}: {space}\n" + return res + ")" + + +_ann_to_number = { + "none": 0, + "vec": 1, + "unroll": 2, + "blockIdx.x": 3, + "blockIdx.y": 4, + "blockIdx.z": 5, + "threadIdx.x": 6, + "threadIdx.y": 7, + "threadIdx.z": 8, + "vthread": 9, + "fuse": 10, +} + + +class ConfigEntity(ConfigSpace): + """A configuration with detailed parameters + + Parameters + ---------- + index: int + index of this config in space + code_hash: str + hash of schedule code + entity_map: dict + map name to transform entity + constraints : list + List of constraints + """ + + def __init__(self, index, code_hash, entity_map, constraints): + super(ConfigEntity, self).__init__() + self.index = index + self._collect = False + self._entity_map = entity_map + self._space_map = None + self._constraints = constraints + self.code_hash = code_hash + + def get_flatten_feature(self): + """flatten entities to a numerical one-dimensional feature vector + + Returns + ------- + fea: np.array + one dimensional float32 array + """ + fea = [] + for _, v in self._entity_map.items(): + if isinstance(v, SplitEntity): + fea.extend(v.size) + elif isinstance(v, ReorderEntity): + # use a naive way: directly copy the permutation + fea.extend(v.perm) + elif isinstance(v, AnnotateEntity): + # one-hot encoding + for ann in v.anns: + tmp = [0] * len(_ann_to_number) + tmp[_ann_to_number[ann]] = 1 + fea.extend(tmp) + elif isinstance(v, OtherOptionEntity): + fea.append(v.val) + return np.array(fea, dtype=np.float32) + + def get_other_option(self): + """ + Returns + ------- + other_option: dict + other tunable parameters (tunable parameters defined by `cfg.define_knob`) + """ + return {x: x.val for x in self._entity_map.values() if isinstance(x, OtherOptionEntity)} + + def to_json_dict(self): + """convert to a json serializable dictionary + + Return + ------ + json_dict: dict + a json serializable dictionary + """ + ret = {} + ret["index"] = int(self.index) + ret["code_hash"] = self.code_hash + entity_map = [] + for k, v in self._entity_map.items(): + if isinstance(v, SplitEntity): + entity_map.append((k, "sp", v.size)) + elif isinstance(v, ReorderEntity): + entity_map.append((k, "re", v.perm)) + elif isinstance(v, AnnotateEntity): + entity_map.append((k, "an", v.anns)) + elif isinstance(v, OtherOptionEntity): + entity_map.append((k, "ot", v.val)) + else: + raise RuntimeError("Invalid entity instance: " + v) + ret["entity"] = entity_map + return ret + + @staticmethod + def from_json_dict(json_dict): + """Build a ConfigEntity from json serializable dictionary + + Parameters + ---------- + json_dict: dict + Json serializable dictionary. This should be the return value + of :any:`to_json_dict`. + + Returns + ------- + config: ConfigEntity + The corresponding config object + + """ + index = json_dict["index"] + code_hash = json_dict["code_hash"] + constraints = [] + entity_map = OrderedDict() + + for item in json_dict["entity"]: + key, knob_type, knob_args = item + if knob_type == "sp": + entity = SplitEntity(knob_args) + elif knob_type == "re": + entity = ReorderEntity(knob_args) + elif knob_type == "an": + entity = AnnotateEntity(knob_args) + elif knob_type == "ot": + entity = OtherOptionEntity(knob_args) + else: + raise RuntimeError("Invalid config knob type: " + knob_type) + entity_map[str(key)] = entity + + return ConfigEntity(index, code_hash, entity_map, constraints) + + def __repr__(self): + return f"{str(self._entity_map)[12:-1]},{self.code_hash},{self.index}" + + +class FallbackConfigEntity(ConfigSpace): + """The config entity created to support fallback""" + + def __init__(self): + super(FallbackConfigEntity, self).__init__() + self.is_fallback = True + + def fallback_split(self, name, constraints): + """Fallback a split knob + + Parameters + ---------- + name: str + name of the knob + constraints: List of int + The maximum tile size for every dimension. Value `-1` means no constraint. + + Examples + -------- + If you use cfg.define_split('tile_0', 128, num_outputs=3), + Then cfg.fallback_split('tile_0', [-1, 8, 4]) will give you cfg['tile_0'].size = [4, 8, 4] + + If you use cfg.define_split('tile_0', 49, num_outputs=3), + Then cfg.fallback_split('tile_0', [-1, 8, 4]) will give you cfg['tile_0'].size = [7, 7, 1] + """ + space = self.space_map[name] + assert isinstance(space, SplitSpace) + assert len(constraints) == space.num_output + + # '-1' means no constraint + constraints = [x if x != -1 else 1e10 for x in constraints] + + entity = self._entity_map[name] + now = space.product + + for i in reversed(range(space.num_output)): + factors = get_factors(now) + + find = len(factors) - 1 + for j, f in enumerate(factors): + if f > constraints[i]: + find = j - 1 + break + + if find >= 0: + entity.size[i] = factors[find] + now //= factors[find] + else: + raise RuntimeError("Cannot find feasible fallback split entity for node: " + name) + + def fallback_with_reference_log(self, ref_log): + """A data driven fallback mechanism. + We use tuned parameters from TopHub as reference data. + For an unseen shape, we find the most similar tuned one from TopHub and + mimic its parameters. + Note that we are not matching by workload (e.g., input size, kernel size), + but instead matching by configuration space. The idea is that if two workloads have + similar configuration space, their optimal configurations are also likely to be similar. + + Parameters + ---------- + ref_log: List of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult) + The reference log + """ + knob_names = [x for x in self.space_map.keys() if isinstance(self.space_map[x], SplitSpace)] + + # find best match config in reference data by matching tiling factors + factor_list = [] + for knob_name in knob_names: + factor_list.append(get_factors(self.space_map[knob_name].product)) + + best_match_cfg = None + best_match_score = 0 + for inp, _ in ref_log: + match_score = 0 + for i, knob_name in enumerate(knob_names): + factors = get_factors(int(np.prod(inp.config[knob_name].size))) + match_score += float(len(set(factor_list[i]).intersection(factors))) / len( + factor_list[i] + ) + + if match_score > best_match_score: + best_match_score, best_match_cfg = match_score, inp.config + + if best_match_cfg is None: + return + + # mimic its tiling strategy + for knob_name in knob_names: + constraint = list(best_match_cfg[knob_name].size) + constraint[0] = -1 + self.fallback_split(knob_name, constraint) + + # copy other knobs + for knob_name in self.space_map.keys(): + if not isinstance(self.space_map[knob_name], SplitSpace): + self._entity_map[knob_name] = best_match_cfg[knob_name] + + def __setitem__(self, name, entity): + """set the entity(knob) of by name + + Parameters + ---------- + name: str + name of the entity + entity: SplitEntity, ReorderEntity, AnnotateEntity, OtherOptionEntity + value of the entity + """ + self._entity_map[name] = entity + + def __repr__(self): + return f"{str(self._entity_map)[12:-1]},{self.code_hash}" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/task.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/task.py new file mode 100644 index 0000000000000000000000000000000000000000..575325c80e5bc9a7df730759a3f174b4f07599ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/task.py @@ -0,0 +1,628 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-variable,not-callable +"""Definition of task function. + +Task can be constructed from tuple of func, args, and kwargs. +func is a state-less function, or a string that +registers the standard task. +""" +import functools + +import numpy as np + +from tvm import runtime +from tvm.ir import container +from tvm.target import Target +from tvm.te import placeholder, tensor +from tvm.tir import expr + + +from ..utils import get_const_int, get_const_tuple +from .dispatcher import ApplyConfig, DispatchContext +from .space import ConfigSpace + + +def _lookup_task(name): + task = TASK_TABLE.get(name) + if task is None: + # Unable to find the given task. This might be because we are + # creating a task based on a name that has not been imported. + # Rather than raising an exception here, we return a dummy + # task which cannot be invoked. + task = MissingTask(name) + return task + + +def serialize_args(args): + """serialize arguments of a topi function to a hashable tuple. + + Parameters + ---------- + args: list of hashable or Tensor + """ + + def _encode(x): + if isinstance(x, tensor.Tensor): + return ("TENSOR", get_const_tuple(x.shape), x.dtype) + if isinstance(x, (tuple, list, container.Array)): + return tuple([_encode(a) for a in x]) + if isinstance(x, (str, int, float, expr.Var, expr.Any)): + return x + if isinstance(x, (expr.StringImm, expr.IntImm, expr.FloatImm)): + return x.value + if isinstance(x, runtime.container.String): + return str(x) + if x is None: + return None + raise RuntimeError( + f'Do not support type "{type(x)}" in argument. Consider to use' + f"primitive types or tvm.tir.Var only" + ) + + ret = [] + for t in args: + ret.append(_encode(t)) + return tuple(ret) + + +def deserialize_args(args): + """The inverse function of :code:`serialize_args`. + + Parameters + ---------- + args: list of hashable or Tensor + """ + ret = [] + for t in args: + if isinstance(t, tuple) and t[0] == "TENSOR": + ret.append(placeholder(shape=t[1], dtype=t[2])) + else: + ret.append(t) + return ret + + +def args_to_workload(args, task_name=None): + """Convert argument list to hashable workload tuple. + This function will convert list to tuple, tvm node to python value and + flatten te.tensor.Tensor to a tuple + + Parameters + ---------- + task_name : str + The AutoTVM task name + + args : list of args + The arguments to the function + + Returns + ------- + ret: hashable + The hashable value + """ + return (task_name,) + serialize_args(args) if task_name is not None else serialize_args(args) + + +class Task(object): + """A Tunable Task + + Parameters + ---------- + name: str + The name of the task. + args: Tuple + Positional argument of func + """ + + def __init__(self, name, args): + self.name = name + self.args = args + self.kwargs = {} # currently unused + + # init null config space + self.config_space = None + self.func = _lookup_task(name) + + # auxiliary info, available after `init_space` is called + self.flop = None + self.target = None + self.target_host = None + + @property + def workload(self): + return (self.name,) + serialize_args(self.args) + + def instantiate(self, config): + """Instantiate this task function (template) with a config. + Returns corresponding schedule. + + Parameters + ---------- + config: template.ConfigEntity + parameter config for this template + + Returns + ------- + sch: tvm.te.schedule.Schedule + The tvm schedule + arg_bufs: Array of te.tensor.Tensor + The input/output buffers + """ + config.flop = 0 + with ApplyConfig(config): + sch, arg_bufs = self.func(*self.args, **self.kwargs) + if not self.flop: + config.flop = config.flop or compute_flop(sch) + self.flop = config.flop + return sch, arg_bufs + + def __getstate__(self): + # custom pickle implementation is required for + # some unpickable local task functions. + # So we only pickle the name of the function + # and restore the function by name when unpickling it. + import cloudpickle # pylint: disable=import-outside-toplevel + + self.target, self.target_host = Target.canon_target_and_host(self.target, self.target_host) + return { + "name": self.name, + "args": self.args, + "kwargs": self.kwargs, + "config_space": self.config_space, + "flop": self.flop, + "target": self.target, + "target_host": self.target_host, + "func": cloudpickle.dumps(self.func), + } + + def __setstate__(self, state): + import cloudpickle # pylint: disable=import-outside-toplevel + + self.name = state["name"] + self.args = state["args"] + self.kwargs = state["kwargs"] + self.config_space = state["config_space"] + self.func = cloudpickle.loads(state["func"]) + self.flop = state["flop"] + self.target, self.target_host = Target.canon_target_and_host( + state["target"], state["target_host"] + ) + + def __repr__(self): + return "Task(func_name=%s, args=%s, kwargs=%s, workload=%s)" % ( + self.name, + self.args, + self.kwargs, + self.workload, + ) + + +TASK_TABLE = {} + + +class TaskTemplate(object): + """ + Task template is used to creates a tunable AutoTVM task. + + It can be defined by a pair of compute and schedule function using + `_register_task_compute` and `_register_task_schedule`, + or by a customized task creation function that is more flexible using + `_register_customized_task`. + + Note that when customized func is registered, compute and schedule function + will be ignored + """ + + def __init__(self): + self.fcompute = None + self.fschedule = None + self.fcustomized = None + + def __call__(self, *args, **kwargs): + args = deserialize_args(args) + if self.fcustomized is None: + return self._default_func(*args, **kwargs) + assert callable(self.fcustomized) + return self.fcustomized(*args, **kwargs) + + def _default_func(self, *args, **kwargs): + assert callable(self.fcompute) and callable(self.fschedule) + out = self.fcompute(*args, **kwargs) + arg_bufs = [out] + self._get_inputs(out) + s = self.fschedule([out]) + return s, arg_bufs + + @staticmethod + def _get_inputs(out): + inputs = [] + queue = [out] + hash_set = set() + while queue: + t = queue.pop(0) + if isinstance(t.op, tensor.PlaceholderOp): + inputs.append(t) + else: + input_tensors = [t for t in t.op.input_tensors if t not in hash_set] + queue.extend(input_tensors) + hash_set.update(input_tensors) + return inputs + + +class MissingTask(TaskTemplate): + """ + Dummy task template for a task lookup which cannot be resolved. + This can occur if the task being requested from _lookup_task() + has not been imported in this run. + """ + + def __init__(self, taskname: str): + super().__init__() + self._taskname = taskname + + def __call__(self, *args, **kwargs): + raise RuntimeError( + f"Attempting to invoke a missing task {self._taskname}." + "It is possible that the function is registered in a " + "Python module that is not imported in this run, or the log is out-of-date." + ) + + +def _register_task_compute(name, func=None): + """Register compute function to autotvm task + + Parameters + ---------- + name: str + The task name + + func: None or callable + If it is None, return a decorator. + If is callable, decorate this function. + + Returns + ------- + decorator: callable + A decorator + """ + + def _do_reg(f): + if name not in TASK_TABLE: + TASK_TABLE[name] = TaskTemplate() + tmpl = TASK_TABLE[name] + if tmpl.fcompute is not None: + raise ValueError(f"Compute is already registered in autoTVM task {name}") + tmpl.fcompute = f + return f + + if func: + return _do_reg(func) + return _do_reg + + +def _register_task_schedule(name, func=None): + """Register schedule function to autotvm task + + Parameters + ---------- + name: str + The task name + + func: None or callable + If it is None, return a decorator. + If is callable, decorate this function. + + Returns + ------- + decorator: callable + A decorator + """ + + def _do_reg(f): + if name not in TASK_TABLE: + TASK_TABLE[name] = TaskTemplate() + tmpl = TASK_TABLE[name] + if tmpl.fschedule is not None: + raise ValueError(f"Schedule is already registered in autoTVM task {name}") + tmpl.fschedule = f + return f + + if func: + return _do_reg(func) + return _do_reg + + +def _register_customized_task(name, func=None): + """Register a customized function to AutoTVM task. + + Parameters + ---------- + name: str + The task name + + func: None or callable + If it is None, return a decorator. + If is callable, decorate this function. + + Returns + ------- + decorator: callable + A decorator + """ + + def _do_reg(f): + if name not in TASK_TABLE: + TASK_TABLE[name] = TaskTemplate() + tmpl = TASK_TABLE[name] + if tmpl.fcustomized is not None: + raise ValueError(f"Customized func is already registered in autoTVM task {name}") + tmpl.fcustomized = f + return f + + if func: + return _do_reg(func) + return _do_reg + + +def template(task_name, func=None): + """Decorate a function as a tunable schedule template. + + Parameters + ---------- + task_name: str + The task name + + func: None or callable + A callable template function. + If it is None, return a decorator. + If is callable, decorate this function. + + Returns + ------- + func: callable + The decorated function + + Examples + -------- + The following code is a tunable template for a blocked matrix multiplication + + .. code-block:: python + + @autotvm.template("matmul") + def matmul(N, L, M, dtype): + A = te.placeholder((N, L), name='A', dtype=dtype) + B = te.placeholder((L, M), name='B', dtype=dtype) + + k = te.reduce_axis((0, L), name='k') + C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name='C') + s = te.create_schedule(C.op) + + # schedule + y, x = s[C].op.axis + k = s[C].op.reduce_axis[0] + + ##### define space begin ##### + cfg = autotvm.get_config() + cfg.define_split("tile_y", y, num_outputs=2) + cfg.define_split("tile_x", x, num_outputs=2) + ##### define space end ##### + + # schedule according to config + yo, yi = cfg["tile_y"].apply(s, C, y) + xo, xi = cfg["tile_x"].apply(s, C, x) + + s[C].reorder(yo, xo, k, yi, xi) + + return s, [A, B, C] + """ + + def _decorate(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + assert not kwargs, "Do not support kwargs in template function call" + workload = args_to_workload(args, task_name) + tgt = Target.current() + cfg = DispatchContext.current.query(tgt, workload) + with ApplyConfig(cfg): + return f(*args, **kwargs) + + _register_customized_task(task_name, f) + return wrapper + + if func: + return _decorate(func) + return _decorate + + +def create(task_name, args, target, target_host=None): + """Create a tuning task and initialize its search space + + Parameters + ---------- + task_name : str + The AutoTVM task name + args : List + Positional arguments + target : Target + The compilation target + target_host: Target, optional + The compilation target for host side + + Returns + ------- + tsk: Task + a task object + """ + args = serialize_args(args) + ret = Task(task_name, args) + + target, target_host = Target.canon_target_and_host(target, target_host) + + # init config space + ret.config_space = ConfigSpace() + + ctx = ApplyConfig(ret.config_space) + with ctx: + with target: + sch, _ = ret.func(*args) + ret.config_space.code_hash = getattr(sch, "code_hash", None) + + ret.flop = ret.config_space.flop or compute_flop(sch) + ret.target = target + ret.target_host = target_host + + return ret + + +def get_config(): + """Get current config object + + Returns + ------- + cfg: ConfigSpace or ConfigEntity + The current config + """ + tgt = Target.current(allow_none=True) + return DispatchContext.current.query(tgt, None) + + +class FlopCalculationError(RuntimeError): + """Error happens when estimating FLOP for a compute op""" + + +def compute_flop(sch): + """Calculate number of FLOP (floating number operations) of the compute ops in a schedule + + Parameters + ---------- + sch: tvm.te.schedule.Schedule + schedule + + Returns + ------- + flop: int + number of FLOP in this schedule + """ + + def _prod_length(axes): + """compute product of the lengths of a list of axes""" + try: + num_iter = int(np.prod([get_const_int(axis.dom.extent) for axis in axes])) + except ValueError: + raise FlopCalculationError("The length of axis is not constant. ") + return num_iter + + def _count_flop(exp): + """compute flop for a single expression""" + if isinstance(exp, expr.Reduce): + num_iter = _prod_length(exp.axis) + combiner = exp.combiner.result + source = exp.source + if len(combiner) != 1: + raise FlopCalculationError("Found multiple output in the combiner of reduce op") + if len(source) != 1: + raise FlopCalculationError("Found multiple output in the source of reduce op") + return num_iter * (_count_flop(combiner[0]) + _count_flop(source[0])) + if isinstance(exp, (expr.FloatImm, expr.IntImm)): + return 0 + if isinstance(exp, expr.Cast): + return _count_flop(exp.value) + if isinstance(exp, expr.Var): + return 0 + if isinstance( + exp, + ( + expr.Add, + expr.Sub, + expr.Mul, + expr.Div, + expr.Mod, + expr.FloorDiv, + expr.FloorMod, + expr.Max, + expr.Min, + expr.EQ, + expr.NE, + expr.LT, + expr.LE, + expr.GT, + expr.GE, + expr.And, + expr.Or, + expr.Not, + ), + ): + base = 1 + + if isinstance(exp, expr.Not): # unary + return base + _count_flop(exp.a) + + return base + _count_flop(exp.a) + _count_flop(exp.b) + if isinstance(exp, expr.Select): + return _count_flop(exp.condition) + max( + _count_flop(exp.true_value), _count_flop(exp.false_value) + ) + if isinstance(exp, expr.ProducerLoad): + # Ignore flops from indexing expressions. + return 0 + + if isinstance(exp, expr.Call): + return sum([_count_flop(x) for x in exp.args]) + + raise FlopCalculationError("Found unsupported operator in the compute expr") + + def traverse(ops): + """accumulate flops""" + ret = 0 + for op in ops: + if isinstance(op, tensor.ComputeOp): + num_element = _prod_length(op.axis) + + body = op.body + if len(body) != 1: + raise FlopCalculationError("Found multiple output in the compute") + exp = body[0] + + ret += num_element * _count_flop(exp) + ret += traverse([t.op for t in op.input_tensors]) + + elif isinstance(op, tensor.PlaceholderOp): + pass + else: + raise FlopCalculationError( + f"{op.name} is not supported by autotvm. " + "Only support te.compute currently. " + "Other ops like tvm.te.scan/te.extern is not supported" + ) + return ret + + try: + ret = traverse(sch.outputs) + except FlopCalculationError as exc: + raise RuntimeError( + "FLOP estimator fails for this operator. Error msg: " + + str(exc) + + ". Please use `cfg.add_flop` to manually set " + "FLOP for this operator" + ) + + if ret == 0: + raise RuntimeError( + "Cannot find float number operation in this operator. " + "Please use `cfg.add_flop` to manually set " + "FLOP for this operator" + ) + return ret diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/topi_integration.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/topi_integration.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f3636edbbe8c905a30120e15b0422a4c89c9e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/task/topi_integration.py @@ -0,0 +1,273 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-variable,invalid-name,unused-argument +""" +Decorators for registering tunable templates to TOPI. + +These decorators can make your simple implementation be able to use different configurations +for different workloads. +Here we directly use all arguments to the TOPI call as "workload", so make sure all the arguments +(except tvm.te.Tensor) in you calls are hashable. For tvm.te.Tensor, +we will serialize it to a hashable tuple. + +See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage. +""" +import functools + +import tvm.te._ffi_api +from tvm.target import Target +from tvm.te import tensor + +from .task import ( + args_to_workload, + serialize_args, + DispatchContext, + _register_task_compute, + _register_task_schedule, +) + + +# Task extractor for relay program +class TaskExtractEnv: + """Global environment for extracting tuning tasks from graph""" + + current = None + registered = None + + def __init__(self, allow_duplicate=False): + self.allow_duplicate = allow_duplicate + self.task_collection = [] + self.wanted_relay_ops = None + self.modified_funcs = [] + self.tracing = False + + def __enter__(self): + self.task_collection = [] + self.tracing = True + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.tracing = False + + def reset(self, wanted_relay_ops=None): + """Reset task collections + + Parameters + ---------- + wanted_relay_ops: List of tvm.ir.Op + The relay ops to be extracted + """ + self.task_collection = [] + self.wanted_relay_ops = wanted_relay_ops + + def add_task(self, task_name, args): + """Add AutoTVM task + + Parameters + ---------- + task_name: str + AutoTVM task name. + + args: tuple + Arguments to the TOPI function. + """ + key = (task_name, serialize_args(args)) + if self.allow_duplicate or key not in self.task_collection: + self.task_collection.append(key) + + def get_tasks(self): + """Get collected tasks + + Returns + ------- + tasks: List of tuple(name, args) + A list of tasks extracted from the graph + """ + return self.task_collection + + @staticmethod + def get(allow_duplicate=False): + """Get the single instance of TaskExtractEnv + + Parameters + ---------- + allow_duplicate : boolean + Whether to fetch all workloads in the network, + even though some of them are the same. This is + useful for graph tuning. + + Returns + ------- + env: TaskExtractEnv + The single instance of TaskExtractEnv + """ + if not TaskExtractEnv.current: + TaskExtractEnv.current = TaskExtractEnv(allow_duplicate) + else: + TaskExtractEnv.current.allow_duplicate = allow_duplicate + return TaskExtractEnv.current + + +def register_topi_compute(task_name, func=None): + """Register a tunable template for a topi compute function. + + The registration will wrap this topi compute to take `cfg` as the first argument, + followed by the original argument list. It uses all its argument as workload and + stores this "workload" to its final ComputeOp, which can be used to reconstruct + "workload" in the following topi_schedule call. + + Parameters + ---------- + task_name: str + The AutoTVM task name + + func: None or callable + If it is None, return a decorator. + If is callable, decorate this function. + + Returns + ------- + decorator: callable + A decorator + + Examples + -------- + See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage. + """ + + def _decorate(topi_compute): + @functools.wraps(topi_compute) + @_register_task_compute(task_name) + def wrapper(*args, **kwargs): + """wrapper function for topi compute""" + assert not kwargs, "Do not support kwargs in template function call" + task_env = TaskExtractEnv.current + if task_env is not None and task_env.tracing: + task_env.add_task(task_name, args) + workload = args_to_workload(args, task_name) + tgt = Target.current() + cfg = DispatchContext.current.query(tgt, workload) + node = topi_compute(cfg, *args) + + # attach workload to return op + op = node.op + attrs = {} + for k, v in node.op.attrs.items(): + attrs[k] = v + attrs["workload"] = workload + if isinstance(op, tensor.ComputeOp): + op = tvm.te._ffi_api.ComputeOp(op.name, op.tag, attrs, op.axis, op.body) + elif isinstance(op, tensor.ExternOp): + op = tvm.te._ffi_api.ExternOp( + op.name, + op.tag, + attrs, + op.inputs, + op.input_placeholders, + op.output_placeholders, + op.body, + ) + else: + raise RuntimeError("Unsupported op type: " + str(type(op))) + + if isinstance(node, tensor.Tensor): + return op.output(0) + return [op.output(i) for i in range(len(node))] + + return wrapper + + if func: + return _decorate(func) + return _decorate + + +def register_topi_schedule(task_name, func=None): + """Register a tunable template for a topi schedule function. + + The registration will wrap this topi schedule to take `cfg` as the first argument, + followed by the original argument list. + + Note that this function will try to find "workload" from all the ComputeOp in the input. + You can attach "workload" to your compute op by using :any:`register_topi_compute`. + + The task name has to be the same as that of the corresponding topi compute function. + + Parameters + ---------- + task_name: str + The AutoTVM task name + + func: None or callable + If it is None, return a decorator. + If is callable, decorate this function. + + Returns + ------- + decorator: callable + A decorator + + Examples + -------- + See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage. + """ + + def _decorate(topi_schedule): + @functools.wraps(topi_schedule) + @_register_task_schedule(task_name) + def wrapper(outs, *args, **kwargs): + """wrapper function for topi schedule""" + workload = get_workload(outs, task_name) + if workload is None: + raise RuntimeError( + f"Cannot find TOPI workload {task_name}. " + "Is it registered with `register_topi_compute`?" + ) + tgt = Target.current() + cfg = DispatchContext.current.query(tgt, workload) + return topi_schedule(cfg, outs, *args, **kwargs) + + return wrapper + + if func: + return _decorate(func) + return _decorate + + +def get_workload(outs, task_name=None): + """Retrieve the workload from outputs""" + visited = set() + + def traverse(tensors): + """traverse all ops to find attached workload""" + for t in tensors: + op = t.op + if op in visited: + continue + visited.add(op) + wkl = traverse(op.input_tensors) + if wkl is not None: + return wkl + + if "workload" in op.attrs: + ret = args_to_workload(op.attrs["workload"]) + if task_name is None or ret[0] == task_name: + return ret + return None + + outs = [outs] if isinstance(outs, tensor.Tensor) else outs + return traverse(outs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..972d0cbaae5c00448f1fe44d38f85c7e1d3b99e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Testing utilities for autotvm""" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6f5f3a56932edcbb5fdc261e5f36c23d57069b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__pycache__/tune_relay.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__pycache__/tune_relay.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64612791f0fc41a01de67f3b9b828a5be59688df Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/__pycache__/tune_relay.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/tune_relay.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/tune_relay.py new file mode 100644 index 0000000000000000000000000000000000000000..96e42fbea090641f01a907494eb82ad870cf0cf2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/testing/tune_relay.py @@ -0,0 +1,227 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=missing-docstring +import argparse +import json +import os +import warnings +from distutils.util import strtobool + +import tvm +from tvm import autotvm +from tvm import meta_schedule as ms +from tvm import relay +from tvm.autotvm.graph_tuner import DPTuner +from tvm.autotvm.tuner import XGBTuner +from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc +from tvm.meta_schedule.testing.relay_workload import get_network +from tvm.meta_schedule.testing.tune_utils import create_timer, generate_input_data +from tvm.support import describe + + +def _parse_args(): + args = argparse.ArgumentParser() + args.add_argument( + "--workload", + type=str, + required=True, + help="The name of the workload to tune. Supported models: " + "https://github.com/apache/tvm/blob/main/python/tvm/meta_schedule/testing/relay_workload.py#L303-L322", # pylint: disable=line-too-long + ) + args.add_argument( + "--input-shape", + type=str, + required=True, + help="The input shape of the workload. Example: '[1, 3, 224, 224]'", + ) + args.add_argument( + "--target", + type=str, + required=True, + help="The target device to tune. " + "Example: 'aws/cpu/c5.9xlarge', 'nvidia/nvidia-v100', 'nvidia/geforce-rtx-3090'", + ) + args.add_argument( + "--num-trials", + type=int, + required=True, + help="The number of trials per kernel. Example: 800", + ) + args.add_argument( + "--rpc-host", + type=str, + required=True, + help="The host address of the RPC tracker. Example: 192.168.6.66", + ) + args.add_argument( + "--rpc-port", type=int, required=True, help="The port of the RPC tracker. Example: 4445" + ) + args.add_argument( + "--rpc-key", type=str, required=True, help="The key of the RPC tracker. Example: '3090ti'" + ) + args.add_argument( + "--work-dir", + type=str, + required=True, + help="The working directory to store the tuning logs. Example: '/tmp/tune_relay'", + ) + args.add_argument( + "--layout", + type=str, + default=None, + help="The layout of the workload. Example: 'NCHW', 'NHWC'", + ) + args.add_argument("--cache-dir", type=str, default=None) + args.add_argument("--number", type=int, default=3) + args.add_argument("--repeat", type=int, default=1) + args.add_argument("--min-repeat-ms", type=int, default=100) + args.add_argument( + "--cpu-flush", + type=lambda x: bool(strtobool(x)), + help="example: True / False", + required=True, + ) + args.add_argument( + "--graph-tuner", + type=lambda x: bool(strtobool(x)), + help="example: True / False", + required=True, + ) + args.add_argument( + "--backend", type=str, choices=["graph", "vm"], help="example: graph / vm", required=True + ) + parsed = args.parse_args() + parsed.target = tvm.target.Target(parsed.target) + parsed.input_shape = json.loads(parsed.input_shape) + parsed.rpc_config = ms.runner.RPCConfig( + tracker_host=parsed.rpc_host, + tracker_port=parsed.rpc_port, + tracker_key=parsed.rpc_key, + session_timeout_sec=600, + ) + return parsed + + +ARGS = _parse_args() + + +def main(): + if ARGS.target.kind.name != "llvm" and ARGS.graph_tuner: + raise ValueError("GraphTuner only supports llvm target") + if ARGS.target.kind.name != "llvm" and ARGS.cpu_flush: + raise ValueError("cpu_flush only supports llvm target") + if ARGS.target.kind.name == "llvm" and not ARGS.cpu_flush: + warnings.warn("cpu_flush is not enabled for llvm target") + + log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json") + graph_opt_sch_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}_graph_opt.log") + measure_option = autotvm.measure_option( + builder=autotvm.LocalBuilder(), + runner=autotvm.RPCRunner( + key=ARGS.rpc_key, + host=ARGS.rpc_host, + port=ARGS.rpc_port, + number=ARGS.number, + repeat=ARGS.repeat, + min_repeat_ms=ARGS.min_repeat_ms, + enable_cpu_cache_flush=ARGS.cpu_flush, + ), + ) + describe() + print(f"Workload: {ARGS.workload}") + mod, params, (input_name, input_shape, input_dtype) = get_network( + ARGS.workload, ARGS.input_shape, layout=ARGS.layout, cache_dir=ARGS.cache_dir + ) + input_info = [{"name": input_name, "shape": input_shape, "dtype": input_dtype}] + input_data = { + item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in input_info + } + for item in input_info: + print(f" input_name : {item['name']}") + print(f" input_shape: {item['shape']}") + print(f" input_dtype: {item['dtype']}") + + with ms.Profiler() as profiler: + with ms.Profiler.timeit("TaskExtraction"): + # extract workloads from relay program + tasks = autotvm.task.extract_from_program( + mod["main"], + target=ARGS.target, + params=params, + ops=( + relay.op.get("nn.conv2d"), + relay.op.get("nn.conv3d"), + relay.op.get("nn.conv2d_transpose"), + relay.op.get("nn.dense"), + relay.op.get("nn.batch_matmul"), + ), + ) + for i, task in enumerate(tasks): + print(f"Task {i} {task.name}: {task}") + + with ms.Profiler.timeit("Tuning"): + if ARGS.num_trials > 0: + for i, task in enumerate(tasks): + prefix = f"[Task {i + 1:2d}/{len(tasks):2d}] " + tuner_obj = XGBTuner(task, loss_type="reg") + n_trial = min(len(task.config_space), ARGS.num_trials) + tuner_obj.tune( + n_trial=n_trial, + early_stopping=800, + measure_option=measure_option, + callbacks=[ + autotvm.callback.progress_bar(n_trial, prefix=prefix), + autotvm.callback.log_to_file(log_file), + ], + ) + if ARGS.graph_tuner: + executor = DPTuner( + graph=mod["main"], + input_shapes={input_name: input_shape}, + records=log_file, + target_ops=[relay.op.get("nn.conv2d")], + target=ARGS.target, + ) + executor.benchmark_layout_transform(min_exec_num=1000) + executor.run() + executor.write_opt_sch2record_file(graph_opt_sch_file) + + relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend] + with ms.Profiler.timeit("PostTuningCompilation"): + if ARGS.graph_tuner: + ctx = autotvm.apply_graph_best(graph_opt_sch_file) + else: + ctx = autotvm.apply_history_best(log_file) + with ctx: + print("compile...") + with tvm.transform.PassContext(opt_level=3): + lib = relay_build(mod, target=ARGS.target, params=params) + print("Tuning Time:") + print(profiler.table()) + + run_module_via_rpc( + rpc_config=ARGS.rpc_config, + lib=lib, + dev_type=ARGS.target.kind.name, + args=input_data, + continuation=create_timer(ARGS.backend), + backend=ARGS.backend, + ) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tophub.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tophub.py new file mode 100644 index 0000000000000000000000000000000000000000..3cbb7ff0e10376a8a5b6583267dbab8a47d1266d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tophub.py @@ -0,0 +1,251 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: skip-file +""" +TopHub: Tensor Operator Hub +To get the best performance, we typically need auto-tuning for the specific devices. +TVM releases pre-tuned parameters in TopHub for some common networks and hardware targets. +TVM will download these parameters for you when you call relay.build. +""" + +import logging +from os import getenv +import sys +from pathlib import Path +from tvm.ir.container import Array + +from .task import ApplyHistoryBest +from ..target import Target +from ..contrib.download import download +from .record import load_from_file +from .utils import EmptyContext + +# environment variable to read TopHub location +AUTOTVM_TOPHUB_LOC_VAR = "TOPHUB_LOCATION" + +# default location of TopHub +AUTOTVM_TOPHUB_DEFAULT_LOC = "https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub" + +# value of AUTOTVM_TOPHUB_LOC_VAR to specify to not read from TopHub +AUTOTVM_TOPHUB_NONE_LOC = "NONE" + +# root path to store TopHub files +AUTOTVM_TOPHUB_ROOT_PATH = Path(Path("~").expanduser(), ".tvm", "tophub") + +# the version of each package +PACKAGE_VERSION = { + "arm_cpu": "v0.08", + "llvm": "v0.04", + "cuda": "v0.10", + "rocm": "v0.05", + "opencl": "v0.04", + "mali": "v0.06", + "intel_graphics": "v0.02", + "vta": "v0.10", + "amd_apu": "v0.01", + "adreno": "v0.01", +} + +logger = logging.getLogger("autotvm") + + +def _alias(name): + """convert alias for some packages""" + table = { + "vtacpu": "vta", + "webgpu": "opencl", + "vulkan": "opencl", + "nvptx": "cuda", + "amd_apu": "amd_apu", + "adreno": "adreno", + } + return table.get(name, name) + + +def _get_tophub_location(): + location = getenv(AUTOTVM_TOPHUB_LOC_VAR, None) + return AUTOTVM_TOPHUB_DEFAULT_LOC if location is None else location + + +def context(target, extra_files=None): + """Return the dispatch context with pre-tuned parameters. + This function will load the corresponding *.log files in AUTOTVM_TOPHUB_ROOT_PATH. + If cannot find them, it will download them from TopHub github repo. + Users can also add their own files in argument `extra_files`. + + Parameters + ---------- + target: Target or List of Target + The compilation targets + extra_files: list of str, optional + Extra log files to load + """ + tophub_location = _get_tophub_location() + if tophub_location == AUTOTVM_TOPHUB_NONE_LOC: + return EmptyContext() + + best_context = ApplyHistoryBest([]) + + targets = target if isinstance(target, (Array, list, tuple)) else [target] + + for tgt in targets: + if isinstance(tgt, str): + tgt = Target(tgt) + + # The TOPHUB file names rely on Target's device or kind. Both these types of + # information exist in Target.keys, but rules of filling this filed is not explicitly + # defined, we are afraid to rely only on Target.keys. At the same time Target.device + # is filled only if device was pointed explicitly in target string, that is not mandatory + # and in some cases we need to get information about device from Target.keys + # In priority order we verify: + # 1) Target.device + # 2) Target.keys + # 3) Target.kind + possible_names = [] + device = tgt.attrs.get("device", "") + if device != "": + possible_names.append(_alias(device)) + possible_names.extend(tgt.keys) + possible_names.append(tgt.kind.name) + + all_packages = list(PACKAGE_VERSION.keys()) + for name in possible_names: + name = _alias(name) + if name in all_packages: + if not check_backend(tophub_location, name): + continue + + filename = f"{name}_{PACKAGE_VERSION[name]}.log" + best_context.load(Path(AUTOTVM_TOPHUB_ROOT_PATH, filename)) + break # only load one file to avoid some fallback template mismatch problem + + if extra_files: + for filename in extra_files: + best_context.load(filename) + + return best_context + + +def check_backend(tophub_location, backend): + """Check whether have pre-tuned parameters of the certain target. + If not, will download it. + + Parameters + ---------- + backend: str + The name of backend. + + Returns + ---------- + success: bool + Whether the check is successful. + """ + backend = _alias(backend) + assert backend in PACKAGE_VERSION, f'Cannot find backend "{backend}" in TopHub' + + version = PACKAGE_VERSION[backend] + package_name = f"{backend}_{version}.log" + if Path(AUTOTVM_TOPHUB_ROOT_PATH, package_name).is_file(): + return True + + # pylint: disable=import-outside-toplevel + if sys.version_info >= (3,): + import urllib.request as urllib2 + else: + import urllib2 + try: + download_package(tophub_location, package_name) + return True + except urllib2.URLError as e: + logging.warning("Failed to download tophub package for %s: %s", backend, e) + return False + + +def download_package(tophub_location, package_name): + """Download pre-tuned parameters of operators for a backend + + Parameters + ---------- + tophub_location: str + The location to download TopHub parameters from + + package_name: str + The name of package + """ + rootpath = Path(AUTOTVM_TOPHUB_ROOT_PATH) + rootpath.mkdir(parents=True, exist_ok=True) + + download_url = f"{tophub_location}/{package_name}" + logger.info("Download pre-tuned parameters package from %s", download_url) + download(download_url, Path(rootpath, package_name), overwrite=True) + + +# global cache for load_reference_log +REFERENCE_LOG_CACHE = {} + + +def load_reference_log(backend, model, workload_name): + """Load reference log from TopHub to support fallback in template. + Template will use these reference logs to choose fallback config. + + Parameters + ---------- + backend: str + The backend name + model: str + The name of the device model + workload_name: str + The name of the workload. (The first item in the workload tuple) + """ + + backend = _alias(backend) + if backend not in PACKAGE_VERSION: + return [] + version = PACKAGE_VERSION[backend] + package_name = f"{backend}_{version}.log" + filename = Path(AUTOTVM_TOPHUB_ROOT_PATH, package_name) + + global REFERENCE_LOG_CACHE + key = (backend, model, workload_name) + + if key not in REFERENCE_LOG_CACHE: + tmp = [] + # If TOPHUB_LOCATION is not AUTOTVM_TOPHUB_NONE_LOC, + # Download the config file from tophub if not exists. + if not Path(filename).exists(): + tophub_location = _get_tophub_location() + if tophub_location != AUTOTVM_TOPHUB_NONE_LOC: + download_package(tophub_location, package_name) + if Path(filename).is_file(): # in case download failed + find = False + inp = None + counts = {} + for inp, res in load_from_file(filename): + counts[inp.target.model] = counts.get(inp.target.model, 0) + 1 + if model == inp.target.model: + find = True + break + # if device model is not find, use the device model with the most tuned workloads + if not find and counts: + model = max(counts.items(), key=lambda k: k[1])[0] + + for inp, res in load_from_file(filename): + if model == inp.target.model and inp.task.workload[0] == workload_name: + tmp.append((inp, res)) + REFERENCE_LOG_CACHE[key] = tmp + + return REFERENCE_LOG_CACHE[key] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1f71d5bf51bd6d5f6cd823c09a820e9d7cfcd34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__init__.py @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +A tuner takes a task as input. It proposes some promising :any:`ConfigEntity` +in the :any:`ConfigSpace` and measure them on the real hardware. Then it +proposed the next batch of :any:`ConfigEntity` according to the measure results. +This tuning loop is repeated. +""" + +from . import callback +from .ga_tuner import GATuner +from .index_based_tuner import GridSearchTuner, RandomTuner +from .tuner import Tuner +from .xgboost_tuner import XGBTuner +from .droplet_tuner import DropletTuner diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28a522762864e7021d0fed092150638d57a36d23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac3eae03130e4a9f66933c3d21dc6ee766bda259 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/callback.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/callback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c79c40919d11cb753f2de770938ada092bce6743 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/callback.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/callback.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/callback.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a44b29336e5d3e48c3f0eee8657293a9238773a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/callback.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/droplet_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/droplet_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fc591ed6355b9dffd2cd851544326b9323f6a56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/droplet_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/droplet_tuner.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/droplet_tuner.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2fd3ff373595783f6919d1816029c3c31d19a19 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/droplet_tuner.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/ga_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/ga_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f8b0d1ae58ca29a1a189f4bb301cd81c2f54a10 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/ga_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/ga_tuner.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/ga_tuner.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b87666ee0bc49fab83614cc89963afa267abe408 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/ga_tuner.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/index_based_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/index_based_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f6096cdf1bde4435462903bbf858b1d2bbc26df Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/index_based_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/index_based_tuner.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/index_based_tuner.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b2fafc83c84ead8ec85919c0386caba8591b25c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/index_based_tuner.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/metric.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/metric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e63c1e1ae133bd7554787c3e32ee005e915ca3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/metric.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/metric.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/metric.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bc95d66ca248f8e84622e9c156e0c5d7c513a32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/metric.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/model_based_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/model_based_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..409520fd3cd751c93fb89853778c12e98710d392 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/model_based_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/model_based_tuner.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/model_based_tuner.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65bb812f0ba7197838a455c94dfb7987985126a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/model_based_tuner.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/sa_model_optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/sa_model_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9587a772e1f530f34149932ad5342c1090376470 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/sa_model_optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/sa_model_optimizer.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/sa_model_optimizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63b114b66562fc4e2183b300edeb17dcb931d34a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/sa_model_optimizer.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8d5ad7b5911e3f1604c6fbfb4b01708c6d362c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/tuner.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/tuner.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1def001513688f0a73d6198c4003eb546b4b82f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/tuner.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_cost_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_cost_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e651801f431c14978cbb8f132b52661f100e952 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_cost_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_cost_model.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_cost_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0e3062440ec5d07a1893aab0615acd8d6b8a23b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_cost_model.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dacf39b19f07b33a2bf4b7fb7e2c6b4bb3239104 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_tuner.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_tuner.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1898e23def261cee2d825076ae13bf53ecc11b02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/__pycache__/xgboost_tuner.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/callback.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/callback.py new file mode 100644 index 0000000000000000000000000000000000000000..40ee24e077b4501bb8b980041328d4ce487bccfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/callback.py @@ -0,0 +1,180 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=consider-using-enumerate,invalid-name +"""Namespace of callback utilities of AutoTVM""" +import sys +import time +import logging + +import numpy as np + +from .. import record +from ..utils import format_si_prefix + +logger = logging.getLogger("autotvm") + + +def log_to_file(file_out, protocol="json"): + """Log the tuning records into file. + The rows of the log are stored in the format of autotvm.record.encode. + + Parameters + ---------- + file_out : File or str + The file to log to. + protocol: str, optional + The log protocol. Can be 'json' or 'pickle' + + Returns + ------- + callback : callable + Callback function to do the logging. + """ + + def _callback(_, inputs, results): + """Callback implementation""" + if isinstance(file_out, str): + with open(file_out, "a") as f: + for inp, result in zip(inputs, results): + f.write(record.encode(inp, result, protocol) + "\n") + else: + for inp, result in zip(inputs, results): + file_out.write(record.encode(inp, result, protocol) + "\n") + + # pylint: disable=import-outside-toplevel + from pathlib import Path + + if isinstance(file_out, Path): + file_out = str(file_out) + + return _callback + + +def log_to_database(db): + """Save the tuning records to a database object. + + Parameters + ---------- + db: Database + The database + """ + + def _callback(_, inputs, results): + """Callback implementation""" + for inp, result in zip(inputs, results): + db.save(inp, result) + + return _callback + + +class Monitor(object): + """A monitor to collect statistic during tuning""" + + def __init__(self): + self.scores = [] + self.timestamps = [] + + def __call__(self, tuner, inputs, results): + for inp, res in zip(inputs, results): + if res.error_no == 0: + flops = inp.task.flop / np.mean(res.costs) + self.scores.append(flops) + else: + self.scores.append(0) + + self.timestamps.append(res.timestamp) + + def reset(self): + self.scores = [] + self.timestamps = [] + + def trial_scores(self): + """get scores (currently is flops) of all trials""" + return np.array(self.scores) + + def trial_timestamps(self): + """get wall clock time stamp of all trials""" + return np.array(self.timestamps) + + +def progress_bar(total, prefix="", si_prefix="G"): + """Display progress bar for tuning + + Parameters + ---------- + total: int + The total number of trials + prefix: str + The prefix of output message + si_prefix: str + SI prefix for flops + """ + + class _Context(object): + """Context to store local variables""" + + def __init__(self): + self.best_flops = 0 + self.cur_flops = 0 + self.ct = 0 + self.total = total + + def __del__(self): + if logger.level < logging.DEBUG: # only print progress bar in non-debug mode + sys.stdout.write(" Done.\n") + + ctx = _Context() + tic = time.time() + + # Validate si_prefix argument + format_si_prefix(0, si_prefix) + + if logger.level < logging.DEBUG: # only print progress bar in non-debug mode + sys.stdout.write( + "\r%s Current/Best: %7.2f/%7.2f %sFLOPS | Progress: (%d/%d) " + "| %.2f s" % (prefix, 0, 0, si_prefix, 0, total, time.time() - tic) + ) + sys.stdout.flush() + + def _callback(tuner, inputs, results): + ctx.ct += len(inputs) + + flops = 0 + for inp, res in zip(inputs, results): + if res.error_no == 0: + flops = inp.task.flop / np.mean(res.costs) + + if not logger.isEnabledFor(logging.DEBUG): # only print progress bar in non-debug mode + ctx.cur_flops = flops + ctx.best_flops = tuner.best_flops + + sys.stdout.write( + "\r%s Current/Best: %7.2f/%7.2f %sFLOPS | Progress: (%d/%d) " + "| %.2f s" + % ( + prefix, + format_si_prefix(ctx.cur_flops, si_prefix), + format_si_prefix(ctx.best_flops, si_prefix), + si_prefix, + ctx.ct, + ctx.total, + time.time() - tic, + ) + ) + sys.stdout.flush() + + return _callback diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/droplet_tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/droplet_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..d115353d773e1f1af746509fbdbd2cd4a6c3e752 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/droplet_tuner.py @@ -0,0 +1,142 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Tuner with droplet algorithm""" + +import logging +import os + +import numpy as np + +from .tuner import Tuner + +LOGGER = logging.getLogger("autotvm") + + +class DropletTuner(Tuner): + """Tuner with droplet algorithm. + + Parameters + ---------- + start_position: list of int + position initial of the space, the default is [0, 0, ..., 0] + pvalue: float + statistical value to confidence level, the default is 0.05 + """ + + def __init__(self, task, start_position=None, pvalue=0.05): + super(DropletTuner, self).__init__(task) + + # space info + self.space = task.config_space + self.dims = [] + + for _, v in self.space.space_map.items(): + self.dims.append(len(v)) + if len(self.dims) == 0: + self.dims.append(1) + + # start position + start_position = [0] * len(self.dims) if start_position is None else start_position + self.best_choice = (-1, [0] * len(self.dims), [99999]) + self.visited = set([self.space.knob2point(start_position)]) + self.execution, self.total_execution, self.pvalue = 1, max(self.dims), pvalue + self.step, self.iter, self.batch = 1, 0, max(16, os.cpu_count()) + self.next = [(self.space.knob2point(start_position), start_position)] + + def num_to_bin(self, value, factor=1): + bin_format = str(0) * (len(self.dims) - len(bin(value)[2:])) + bin(value)[2:] + return [int(i) * factor for i in bin_format] + + def search_space(self, factor=1): + search_space = [] + for i in range(2 ** len(self.dims) - 1, 0, -1): + search_space += [self.num_to_bin(i, factor)] + [self.num_to_bin(i, -factor)] + return search_space + + def next_pos(self, new_positions): + "returns the neighbors of the best solution" + next_set = [] + for p in new_positions: + if len(next_set) > self.batch: + break + new_p = [ + (x + y) % self.dims[i] if (x + y > 0) else 0 + for i, (x, y) in enumerate(zip(p, self.best_choice[1])) + ] + idx_p = self.space.knob2point(new_p) + if idx_p not in self.visited: + self.visited.add(idx_p) + next_set.append((idx_p, new_p)) + return next_set + + def p_value(self, elem_1, elem_2): + if len(elem_1) <= 1 or len(elem_2) <= 1: + return True + + from scipy import stats # pylint: disable=import-outside-toplevel + + return stats.ttest_ind(np.array(elem_1), np.array(elem_2)).pvalue <= self.pvalue + + def next_batch(self, batch_size): + ret, self.batch = [], batch_size + for i in range(batch_size): + if i >= len(self.next): + break + if self.space.is_index_valid(self.next[i][0]): + ret.append(self.space.get(self.next[i][0])) + return ret + + def speculation(self): + # Gradient descending direction prediction and search space filling + while len(self.next) < self.batch and self.execution < self.total_execution: + self.execution += self.step + self.next += self.next_pos(self.search_space(self.execution)) + + def update(self, inputs, results): + found_best_pos, count_valids = False, 0 + for i, (_, res) in enumerate(zip(inputs, results)): + try: + if np.mean(self.best_choice[2]) > np.mean(res.costs) and self.p_value( + self.best_choice[2], res.costs + ): + self.best_choice = (self.next[i][0], self.next[i][1], res.costs) + found_best_pos = True + count_valids += 1 + except TypeError: + LOGGER.debug("Solution is not valid") + continue + else: + continue + + self.next = self.next[self.batch : -1] + if found_best_pos: + self.next += self.next_pos(self.search_space()) + self.execution = 1 + self.speculation() + # stop, because all neighborhoods are invalid. + if count_valids == 0 and self.iter > 3: + self.next = [] + LOGGER.warning( + f"Warning: early termination due to an all-invalid neighborhood \ + after {self.iter} iterations" + ) + + def has_next(self): + return len(self.next) > 0 + + def load_history(self, data_set, min_seed_records=500): + pass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/ga_tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/ga_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5b87ac5d70b57aea2a16d09180c3204eb66e3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/ga_tuner.py @@ -0,0 +1,125 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=consider-using-enumerate,invalid-name,abstract-method + +"""Tuner with genetic algorithm""" + +import numpy as np + +from .tuner import Tuner + + +class GATuner(Tuner): + """Tuner with genetic algorithm. + This tuner does not have a cost model so it always run measurement on real machines. + This tuner expands the :code:`ConfigEntity` as gene. + + Parameters + ---------- + pop_size: int + number of genes in one generation + elite_num: int + number of elite to keep + mutation_prob: float + probability of mutation of a knob in a gene + """ + + def __init__(self, task, pop_size=100, elite_num=3, mutation_prob=0.1): + super(GATuner, self).__init__(task) + + # algorithm configurations + self.pop_size = pop_size + self.elite_num = elite_num + self.mutation_prob = mutation_prob + + assert elite_num <= pop_size, "The number of elites must be less than population size" + + # random initialization + self.pop_size = min(self.pop_size, len(self.space)) + self.elite_num = min(self.pop_size, self.elite_num) + self.visited = set(self.space.sample_ints(self.pop_size)) + + # current generation + self.genes = [self.space.point2knob(idx) for idx in self.visited] + self.scores = [] + self.elites = [] + self.elite_scores = [] + self.trial_pt = 0 + + def next_batch(self, batch_size): + ret = [] + while len(ret) < batch_size and self.has_next(): + gene = self.genes[self.trial_pt % self.pop_size] + self.trial_pt += 1 + ret.append(self.space.get(self.space.knob2point(gene))) + return ret + + def update(self, inputs, results): + for inp, res in zip(inputs, results): + if res.error_no == 0: + y = inp.task.flop / np.mean(res.costs) + self.scores.append(y) + else: + self.scores.append(0.0) + + if len(self.scores) >= len(self.genes) and len(self.visited) < len(self.space): + next_genes = [] + # There is no reason to crossover or mutate since the size of the unvisited + # is no larger than the size of the population. + if len(self.space) - len(self.visited) <= self.pop_size: + for idx in range(self.space.range_length): + if self.space.is_index_valid(idx) and idx not in self.visited: + next_genes.append(self.space.point2knob(idx)) + self.visited.add(idx) + else: + genes = self.genes + self.elites + scores = np.array(self.scores[: len(self.genes)] + self.elite_scores) + + # reserve elite + self.elites, self.elite_scores = [], [] + elite_indexes = np.argpartition(scores, -self.elite_num)[-self.elite_num :] + for ind in elite_indexes: + self.elites.append(genes[ind]) + self.elite_scores.append(scores[ind]) + + indices = np.arange(len(genes)) + scores += 1e-8 + scores /= np.max(scores) + probs = scores / np.sum(scores) + while len(next_genes) < self.pop_size: + # cross over + p1, p2 = np.random.choice(indices, size=2, replace=False, p=probs) + p1, p2 = genes[p1], genes[p2] + point = np.random.randint(len(self.space.dims)) + tmp_gene = p1[:point] + p2[point:] + # mutation + for j, dim in enumerate(self.space.dims): + if np.random.random() < self.mutation_prob: + tmp_gene[j] = np.random.randint(dim) + + if self.space.is_index_valid(self.space.knob2point(tmp_gene)): + next_genes.append(tmp_gene) + self.visited.add(self.space.knob2point(tmp_gene)) + self.genes = next_genes + self.trial_pt = 0 + self.scores = [] + + def has_next(self): + return len(self.visited) - (len(self.genes) - self.trial_pt) < len(self.space) + + def load_history(self, data_set, min_seed_records=500): + pass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/index_based_tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/index_based_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..881728bc9b34b6fb316bea0ce24996f7c43a634d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/index_based_tuner.py @@ -0,0 +1,101 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=abstract-method +"""Grid search tuner and random tuner""" + +from .tuner import Tuner + + +class IndexBaseTuner(Tuner): + """Base class for index based tuner + This type of tuner determine the next batch of configs based on config indices. + + Parameters + ---------- + task: autotvm.task.Task + The tuning task + + range_idx: Optional[Tuple[int, int]] + A tuple of index range that this tuner can select from [begin_idx, end_idx] + """ + + def __init__(self, task, range_idx=None): + super(IndexBaseTuner, self).__init__(task) + assert range_idx is None or isinstance( + range_idx, tuple + ), "range_idx must be None or (int, int)" + + self.visited = [] + self.begin_idx, self.end_idx = range_idx or (0, self.space.range_length - 1) + assert self.begin_idx >= 0, "Start index must be positive" + self.end_idx += 1 # Further end_idx is exclusive + assert ( + self.end_idx <= self.space.range_length + ), "Finish index must be less the space range length " + self.range_length = self.end_idx - self.begin_idx + assert self.range_length > 0, "Index range must be positive" + self.visited_max = self.space.subrange_length(self.begin_idx, self.end_idx) + + def has_next(self): + return len(self.visited) < self.visited_max + + def load_history(self, data_set, min_seed_records=500): + pass + + +class GridSearchTuner(IndexBaseTuner): + """Enumerate the search space in a grid search order""" + + def __init__(self, task, range_idx=None): + super(GridSearchTuner, self).__init__(task, range_idx) + + self.index = self.begin_idx + if not self.space.is_index_valid(self.index): + self.index = self.space.get_next_index( + self.index, start=self.begin_idx, end=self.end_idx + ) + + def next_batch(self, batch_size): + ret = [] + while len(ret) < batch_size and self.has_next(): + self.visited.append(self.index) + ret.append(self.space.get(self.index)) + self.index = self.space.get_next_index( + self.index, start=self.begin_idx, end=self.end_idx + ) + return ret + + +class RandomTuner(IndexBaseTuner): + """Enumerate the search space in a random order + + Parameters + ---------- + task: autotvm.task.Task + Tuning Task + + range_idx: Optional[Tuple[int, int]] + A tuple of index range to random + """ + + def next_batch(self, batch_size): + ret = [] + while len(ret) < batch_size and self.has_next(): + index = self.space.get_rand_index(self.begin_idx, self.end_idx, to_exclude=self.visited) + self.visited.append(index) + ret.append(self.space.get(index)) + return ret diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/metric.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..f6932f80d3e3c614b29091c19082343d3267f56b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/metric.py @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Metrics for evaluating tuning process""" + +import numpy as np + +from ..utils import get_rank + + +def max_curve(trial_scores): + """f(n) = max([s[i] fo i < n]) + + Parameters + ---------- + trial_scores: Array of float + the score of i th trial + + Returns + ------- + curve: Array of float + function values + """ + ret = np.empty(len(trial_scores)) + keep = -1e9 + for i, score in enumerate(trial_scores): + keep = max(keep, score) + ret[i] = keep + return ret + + +def mean_curve(trial_scores): + """f(n) = mean([s[i] fo i < n]) + + Parameters + ---------- + trial_scores: Array of float + the score of i th trial + + Returns + ------- + curve: Array of float + function values + """ + ret = np.empty(len(trial_scores)) + keep = 0 + for i, score in enumerate(trial_scores): + keep += score + ret[i] = keep / (i + 1) + return ret + + +def recall_curve(trial_ranks, top=None): + """ + if top is None, f(n) = sum([I(rank[i] < n) for i < n]) / n + if top is K, f(n) = sum([I(rank[i] < K) for i < n]) / K + + Parameters + ---------- + trial_ranks: Array of int + the rank of i th trial in labels + top: int or None + top-n recall + + Returns + ------- + curve: Array of float + function values + """ + if not isinstance(trial_ranks, np.ndarray): + trial_ranks = np.array(trial_ranks) + + ret = np.zeros(len(trial_ranks)) + if top is None: + for i in range(len(trial_ranks)): + ret[i] = np.sum(trial_ranks[:i] <= i) / (i + 1) + else: + for i in range(len(trial_ranks)): + ret[i] = 1.0 * np.sum(trial_ranks[:i] < top) / top + return ret + + +def cover_curve(trial_ranks): + """ + f(n) = max k s.t. {1,2,...,k} is a subset of {ranks[i] for i < n} + + Parameters + ---------- + trial_ranks: Array of int + the rank of i th trial in labels + + Returns + ------- + curve: Array of float + function values + """ + ret = np.empty(len(trial_ranks)) + keep = -1 + cover = set() + for i, rank in enumerate(trial_ranks): + cover.add(rank) + while keep + 1 in cover: + keep += 1 + ret[i] = keep + 1 + return ret / len(trial_ranks) + + +def average_recall(preds, labels, N): + """evaluate average recall-n for predictions and labels""" + trials = np.argsort(preds)[::-1] + ranks = get_rank(labels[trials]) + curve = recall_curve(ranks) + return np.sum(curve[:N]) / N diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/model_based_tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/model_based_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..0841e9a7652848dc0c506ca35f6f72d7c1c630e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/model_based_tuner.py @@ -0,0 +1,356 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=no-else-return,invalid-name,consider-using-enumerate,abstract-method +"""Base class for model-based tuner +This type of tuner will fit a cost model and use some optimization methods to +find optimums points of cost model in space. +""" +import gc + +import numpy as np + +from .tuner import Tuner +from ..env import GLOBAL_SCOPE + + +class FeatureCache(object): + """Feature cache manager for cache sharing between different cost models""" + + def __init__(self): + self.feature_cache = {} + + def get(self, key): + """Get feature cache dictionary for a key + + Parameters + ---------- + key: str + The key of a feature type + + Returns + ------- + fea_cache: dict + cache dictionary + """ + if key not in self.feature_cache: + self.feature_cache[key] = {} + + return self.feature_cache[key] + + def size(self, key): + """ " Get the size of a feature cache dictionary + + Parameters + ---------- + key: str + The key of a feature type + + Returns + ------- + n: int + """ + return len(self.feature_cache.get(key, tuple())) + + def clear(self, key): + """Clear feature cache for a key + + Parameters + ---------- + key: str + The key of a feature type + """ + del self.feature_cache[key] + self.feature_cache[key] = {} + gc.collect() + + +class CostModel(object): + """Cost model to predict the speed of a config""" + + def __init__(self): + pass + + def fit(self, xs, ys, plan_size): + """Fit to training data + + Parameters + ---------- + xs: Array of int + indexes of configs in the config space + ys: Array of float + The speed (flop, float number operations per second) + plan_size: int + The plan size of tuner + """ + raise NotImplementedError() + + def fit_log(self, records, plan_size, min_seed_records=500): + """Fit training data from log. + + Parameters + ---------- + records: Array of Tuple(MeasureInput, MeasureResult) + The tuning records + plan_size: int + The plan size of tuner + min_seed_records: int + Defaults to 500. Indicates the minimum number of records to + train the tuner with. If there are less than `min_seed_records` + number of records in `data_set`, no training of the tuner + will be done. + """ + raise NotImplementedError() + + def predict(self, xs, output_margin=False): + """Predict the speed of configs + + Parameters + ---------- + xs: Array of int + The indexes of configs to predict + output_margin: bool, optional + Whether output the untransformed margin. + When a model is used as base model, it should output untransformed margin + + Returns + ------- + preds: Array of float + The prediction + """ + raise NotImplementedError() + + def load_basemodel(self, base_model): + """Load base model for transfer learning + + Parameters + ---------- + base_model: CostModel + base model + """ + raise NotImplementedError() + + def spawn_base_model(self): + """Clone a base model with the same parameters. + The base model is used to fit history data in transfer learning. + + Returns + ------- + model: CostModel + A model with the same hyperparameter (argument) + """ + raise NotImplementedError() + + +class ModelOptimizer(object): + """Optimizer used to find optimal points of cost model""" + + def __init__(self): + pass + + def find_maximums(self, model, num, exclusive): + """Find maximum of a cost model + + Note we use cost model to predict GFLOPS, so we should find the maximum + + Parameters + ---------- + model: CostModel + Cost model + num: int + The number of returned maximum points + exclusive: set, optional + The excluded set of this optimizer. Return results won't include any + elements in this set. + """ + raise NotImplementedError() + + +class ModelBasedTuner(Tuner): + """Base class for model based tuner + This type of tuner will fit a cost model and use an optimizer to + find the maximums of the cost model as next trials + + Parameters + ---------- + task: autotvm.task.Task + The tuning task + cost_model: CostModel + The cost model that predicts the speed of a config (IR) + model_optimizer: + The optimizer to find local optimum points of cost model in tuning search space + plan_size: int + Tuner will re-fit model per `plan_size` new measure samples + diversity_filter_ratio: int or float, optional + If is not None, the tuner will first select + top-(plan_size * diversity_filter_ratio) candidates according to the cost model + and then pick plan_size of them according to the diversity metric. + """ + + def __init__(self, task, cost_model, model_optimizer, plan_size, diversity_filter_ratio=None): + super(ModelBasedTuner, self).__init__(task) + + # space + self.task = task + self.target = task.target + self.plan_size = plan_size + + self.cost_model = cost_model + self.model_optimizer = model_optimizer + self.diversity_filter_ratio = diversity_filter_ratio + + if self.diversity_filter_ratio: + assert self.diversity_filter_ratio >= 1, ( + "Diversity filter ratio " "must be larger than one" + ) + + # trial plan + self.trials = [] + self.trial_pt = 0 + self.visited = set() + + # observed samples + self.xs = [] + self.ys = [] + self.flops_max = 0.0 + self.train_ct = 0 + + def next_batch(self, batch_size): + ret = [] + while len(ret) < batch_size and self.has_next(): + while self.trial_pt < len(self.trials): + index = self.trials[self.trial_pt] + if index not in self.visited and self.space.is_index_valid(index): + break + self.trial_pt += 1 + + if self.trial_pt >= len(self.trials) - int(0.05 * self.plan_size): + # if the trial list is empty or + # the tuner is doing the last 5% trials (e-greedy), choose randomly + index = self.space.get_rand_index(to_exclude=self.visited) + ret.append(self.space.get(index)) + self.visited.add(index) + return ret + + def update(self, inputs, results): + for inp, res in zip(inputs, results): + index = inp.config.index + if res.error_no == 0: + self.xs.append(index) + flops = inp.task.flop / np.mean(res.costs) + self.flops_max = max(self.flops_max, flops) + self.ys.append(flops) + else: + self.xs.append(index) + self.ys.append(0.0) + # Usually the update function is called during the tune loop + # after the index is already added to the visited set. + # However, adding the index to visited again here enables us + # to also use this update function to resume tuning progress in + # case of interruption. + assert self.space.is_index_valid(index) + self.visited.add(index) + # if we have enough new training samples + if len(self.xs) >= self.plan_size * (self.train_ct + 1) and self.flops_max > 1e-6: + self.cost_model.fit(self.xs, self.ys, self.plan_size) + if self.diversity_filter_ratio: + candidate = self.model_optimizer.find_maximums( + self.cost_model, self.plan_size * self.diversity_filter_ratio, self.visited + ) + scores = self.cost_model.predict(candidate) + knobs = [self.space.point2knob(x) for x in candidate] + pick_index = submodular_pick(0 * scores, knobs, self.plan_size, knob_weight=1) + maximums = np.array(candidate)[pick_index] + else: + maximums = self.model_optimizer.find_maximums( + self.cost_model, self.plan_size, self.visited + ) + + self.trials = maximums + self.trial_pt = 0 + self.train_ct += 1 + + def load_history(self, data_set, min_seed_records=500): + # set in_tuning as True to make the feature extraction consistent + GLOBAL_SCOPE.in_tuning = True + + # fit base model + base_model = self.cost_model.spawn_base_model() + success = base_model.fit_log(data_set, self.plan_size, min_seed_records) + + if not success: + GLOBAL_SCOPE.in_tuning = False + return + + # use base model to select initial points + if not self.trials: + # no plan yet, use base model to select initial trials + maximums = self.model_optimizer.find_maximums(base_model, self.plan_size, self.visited) + self.trials = maximums + self.trial_pt = 0 + + self.cost_model.load_basemodel(base_model) + GLOBAL_SCOPE.in_tuning = False + + def has_next(self): + return len(self.visited) < len(self.space) + + +def submodular_pick(scores, knobs, n_pick, knob_weight=1.0): + """Run greedy optimization to pick points with regard to both score and diversity. + DiversityScore = knob_weight * number of unique knobs in the selected set + Obj = sum(scores[i] for i in pick) + DiversityScore + Note that this objective function is a monotone submodular function. + + Parameters + ---------- + scores: Array of float + score of every points + knobs: Array of Array of int + feature vector (tunable knobs) of every points + n_pick: int + number of points to pick + knob_weight: float + weight of an unique knob feature + """ + n = len(scores) + assert n == len(knobs) + n_knobs = len(knobs[0]) + + knobs_set = [set() for _ in range(n_knobs)] + + ret = [] + remain = list(range(len(scores))) + + for _ in range(n_pick): + max_x = -1 + max_delta = -1e9 + + for x in remain: + tmp_delta = scores[x] + for i in range(n_knobs): + if knobs[x][i] not in knobs_set[i]: + tmp_delta += knob_weight + + if tmp_delta > max_delta: + max_delta, max_x = tmp_delta, x + + ret.append(max_x) + remain.remove(max_x) + for i in range(n_knobs): + knobs_set[i].add(knobs[max_x][i]) + + return ret diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/sa_model_optimizer.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/sa_model_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..518fc0e45eb29f97e6abe020bbce2a5ddd5ecb98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/sa_model_optimizer.py @@ -0,0 +1,155 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=consider-using-enumerate, invalid-name, invalid-sequence-index +""" +Cost model optimizer based on simulated annealing +""" + +import heapq +import logging +import time + +import numpy as np + +from .model_based_tuner import ModelOptimizer + +logger = logging.getLogger("autotvm") + + +class SimulatedAnnealingOptimizer(ModelOptimizer): + """parallel simulated annealing optimization algorithm + + Parameters + ---------- + task: Task + The tuning task + n_iter: int + The number of iterations of simulated annealing + temp: float or Array of float + If is a single float, then use a constant temperature. + If is an Array, then perform linear cooling from temp[0] to temp[1] + early_stop: int, optional + Stop iteration if the optimal set do not change in `early_stop` rounds + log_interval: int, optional + Print log every `log_interval` iterations + """ + + def __init__( + self, + task, + n_iter=500, + temp=(1, 0), + persistent=True, + parallel_size=128, + early_stop=50, + log_interval=50, + ): + super(SimulatedAnnealingOptimizer, self).__init__() + self.task = task + self.n_iter = n_iter + self.temp = temp + self.persistent = persistent + self.parallel_size = min(parallel_size, len(self.task.config_space)) + self.early_stop = early_stop or 1e9 + self.log_interval = log_interval + self.points = None + + def find_maximums(self, model, num, exclusive): + tic = time.time() + temp, n_iter, early_stop, log_interval = ( + self.temp, + self.n_iter, + self.early_stop, + self.log_interval, + ) + + if self.persistent and self.points is not None: + points = self.points + else: + points = self.task.config_space.sample_ints(self.parallel_size) + + scores = model.predict(points) + + # build heap and insert initial points + heap_items = [(float("-inf"), -1 - i) for i in range(num)] + heapq.heapify(heap_items) + in_heap = set(exclusive) + in_heap.update([x[1] for x in heap_items]) + + for s, p in zip(scores, points): + if s > heap_items[0][0] and p not in in_heap: + pop = heapq.heapreplace(heap_items, (s, p)) + in_heap.remove(pop[1]) + in_heap.add(p) + + k = 0 + k_last_modify = 0 + + if isinstance(temp, (tuple, list, np.ndarray)): + t = temp[0] + cool = 1.0 * (temp[0] - temp[1]) / (n_iter + 1) + else: + t = temp + cool = 0 + + while k < n_iter and k < k_last_modify + early_stop: + new_points = np.empty_like(points) + for i, p in enumerate(points): + new_points[i] = self.task.config_space.random_walk(p) + + new_scores = model.predict(new_points) + + ac_prob = np.exp(np.minimum((new_scores - scores) / (t + 1e-5), 1)) + ac_index = np.random.random(len(ac_prob)) < ac_prob + + points[ac_index] = new_points[ac_index] + scores[ac_index] = new_scores[ac_index] + + for s, p in zip(new_scores, new_points): + if s > heap_items[0][0] and p not in in_heap: + pop = heapq.heapreplace(heap_items, (s, p)) + in_heap.remove(pop[1]) + in_heap.add(p) + k_last_modify = k + + k += 1 + t -= cool + + if log_interval and k % log_interval == 0: + t_str = f"{t:.2f}" + logger.debug( + "SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\ttemp: %s\t" + "elapsed: %.2f", + k, + k_last_modify, + heap_items[0][0], + np.max([v for v, _ in heap_items]), + t_str, + time.time() - tic, + ) + + heap_items.sort(key=lambda item: -item[0]) + heap_items = [x for x in heap_items if x[0] >= 0] + logger.debug( + "SA iter: %d\tlast_update: %d\telapsed: %.2f", k, k_last_modify, time.time() - tic + ) + logger.debug("SA Maximums: %s", heap_items) + + if self.persistent: + self.points = points + + return [x[1] for x in heap_items] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..a758a5d4cd9c76a78b6839cb935cfe2111866270 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/tuner.py @@ -0,0 +1,229 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-argument, no-self-use, invalid-name +"""Base class of tuner""" +import logging +import tempfile + +import numpy as np + +from ..measure import MeasureInput, create_measure_batch +from ..utils import format_si_prefix + +from ..env import GLOBAL_SCOPE + +logger = logging.getLogger("autotvm") + + +class Tuner(object): + """Base class for tuners + + Parameters + ---------- + task: autotvm.task.Task + Tuning Task + """ + + def __init__(self, task, **kwargs): + self.param = kwargs + self.recorder = None + + self.task = task + self.space = self.task.config_space + + # keep the current best + self.best_config = None + self.best_flops = 0 + self.best_measure_pair = None + self.best_iter = 0 + self.error_ct_threshold = 150 + + # time to leave + self.ttl = None + self.n_trial = None + self.early_stopping = None + + def has_next(self): + """Whether has next untried config in the space + + Returns + ------- + has_next: bool + """ + raise NotImplementedError() + + def next_batch(self, batch_size): + """get the next batch of configs to be measure on real hardware + + Parameters + ---------- + batch_size: int + The size of the batch + + Returns + ------- + a batch of configs + """ + raise NotImplementedError() + + def update(self, inputs, results): + """Update parameters of the tuner according to measurement results + + Parameters + ---------- + inputs: Array of autotvm.measure.MeasureInput + The input for measurement + results: Array of autotvm.measure.MeasureResult + result for measurement + """ + + def tune(self, n_trial, measure_option, early_stopping=None, callbacks=(), si_prefix="G"): + """Begin tuning + + Parameters + ---------- + n_trial: int + Maximum number of configs to try (measure on real hardware) + measure_option: dict + The options for how to measure generated code. + You should use the return value ot autotvm.measure_option for this argument. + early_stopping: int, optional + Early stop the tuning when not finding better configs in this number of trials + callbacks: List of callable + A list of callback functions. The signature of callback function is + (Tuner, List of MeasureInput, List of MeasureResult) + with no return value. These callback functions will be called on + every measurement pair. See autotvm/tuner/callback.py for some examples. + si_prefix: str + One of tvm.autotvm.utils.SI_PREFIXES. The SI prefix to use when reporting FLOPS. + """ + measure_batch = create_measure_batch(self.task, measure_option) + n_parallel = getattr(measure_batch, "n_parallel", 1) + early_stopping = early_stopping or 1e9 + self.n_trial = n_trial + self.early_stopping = early_stopping + + # Validate si_prefix arg + format_si_prefix(0, si_prefix) + + old_level = logger.level + + GLOBAL_SCOPE.in_tuning = True + i = error_ct = 0 + errors = [] + while i < n_trial: + if not self.has_next(): + break + + configs = self.next_batch(min(n_parallel, n_trial - i)) + + inputs = [MeasureInput(self.task.target, self.task, config) for config in configs] + results = measure_batch(inputs) + + # keep best config + for k, (inp, res) in enumerate(zip(inputs, results)): + config = inp.config + if res.error_no == 0: + flops = inp.task.flop / np.mean(res.costs) + error_ct = 0 + result_msg = res + else: + flops = 0 + error_ct += 1 + tb, error = res.costs + if isinstance(error, str): + errors.append(tb + "\n" + error) + else: + errors.append(tb + "\n" + str(error)) + result_msg = errors[-1] + + if flops > self.best_flops: + self.best_flops = flops + self.best_config = config + self.best_measure_pair = (inp, res) + self.best_iter = i + k + + logger.debug( + "No: %d\t%sFLOPS: %.2f/%.2f\tresult: %s\t%s", + i + k + 1, + si_prefix, + format_si_prefix(flops, si_prefix), + format_si_prefix(self.best_flops, si_prefix), + result_msg, + config, + ) + + i += len(results) + self.ttl = min(early_stopping + self.best_iter, n_trial) - i + + self.update(inputs, results) + for callback in callbacks: + callback(self, inputs, results) + + if i >= self.best_iter + early_stopping: + logger.debug("Early stopped. Best iter: %d.", self.best_iter) + break + + if error_ct > self.error_ct_threshold: + logging.basicConfig() + logger.warning("Too many errors happen in the tuning. Switching to debug mode.") + logger.setLevel(logging.DEBUG) + else: + logger.setLevel(old_level) + + if error_ct == i: + _, f = tempfile.mkstemp(prefix="tvm_tuning_errors_", suffix=".log", text=True) + with open(f, "w") as file: + file.write("\n".join(errors)) + logging.warning( + "Could not find any valid schedule for task %s. " + "A file containing the errors has been written to %s.", + self.task, + f, + ) + GLOBAL_SCOPE.in_tuning = False + del measure_batch + + def reset(self): + """reset the status of tuner""" + self.best_config = None + self.best_flops = 0 + self.best_measure_pair = None + + def load_history(self, data_set, min_seed_records=500): + """load history data for transfer learning + + Parameters + ---------- + data_set: Array of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult) pair + Previous tuning records + min_seed_records: int + Defaults to 500. Indicates the minimum number of records to + train the tuner with. If there are less than `min_seed_records` + number of records in `data_set`, no training of the tuner + will be done. + """ + raise NotImplementedError() + + def set_error_threshold(self, threshold): + """Modify error counter threshold, which controls switch to debug mode + + Parameters + ---------- + threshold: New threshold value + """ + self.error_ct_threshold = threshold diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/xgboost_cost_model.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/xgboost_cost_model.py new file mode 100644 index 0000000000000000000000000000000000000000..048eecf10e253f6b499701b448be5347dd312537 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/xgboost_cost_model.py @@ -0,0 +1,706 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""XGBoost as cost model""" + +import logging +import time + +from typing import Dict + +import numpy as np +from tvm.contrib.popen_pool import PopenPoolExecutor, StatusKind + +from .. import feature +from ..utils import get_rank +from .metric import cover_curve, max_curve, recall_curve +from .model_based_tuner import CostModel, FeatureCache + +try: + from xgboost.callback import TrainingCallback # type: ignore +except ImportError: + + class TrainingCallback: # type: ignore + pass + + +xgb = None + +logger = logging.getLogger("autotvm") + + +class XGBoostCostModel(CostModel): + """XGBoost as cost model + + Parameters + ---------- + task: Task + The tuning task + feature_type: str, optional + If is 'itervar', use features extracted from IterVar (loop variable). + If is 'knob', use flatten ConfigEntity directly. + If is 'curve', use sampled curve feature (relation feature). + + Note on choosing feature type: + For single task tuning, 'itervar' and 'knob' are good. + 'itervar' is more accurate but 'knob' is much faster. + There are some constraints on 'itervar', if you meet + problems with feature extraction when using 'itervar', + you can switch to 'knob'. + + For cross-shape tuning (e.g. many convolutions with different shapes), + 'itervar' and 'curve' has better transferability, + 'knob' is faster. + For cross-device or cross-operator tuning, you can use 'curve' only. + loss_type: str + If is 'reg', use regression loss to train cost model. + The cost model predicts the normalized flops. + If is 'rank', use pairwise rank loss to train cost model. + The cost model predicts relative rank score. + If is 'rank-binary', use pairwise rank loss with binarized labels to train cost model. + The cost model predicts relative rank score. + num_threads: int, optional + The number of threads. + log_interval: int, optional + If is not none, the cost model will print training log every `log_interval` iterations. + upper_model: XGBoostCostModel, optional + The upper model used in transfer learning + """ + + def __init__( + self, + task, + feature_type, + loss_type="reg", + num_threads=None, + log_interval=25, + upper_model=None, + ): + global xgb + super(XGBoostCostModel, self).__init__() + try: + if xgb is None: + xgb = __import__("xgboost") + except ImportError: + raise ImportError( + "XGBoost is required for XGBoostCostModel. " + "Please install its python package first. " + "Help: (https://xgboost.readthedocs.io/en/latest/) " + ) + + self.task = task + self.target = task.target + self.space = task.config_space + + self.fea_type = feature_type + self.loss_type = loss_type + self.num_threads = num_threads + self.log_interval = log_interval + + self.loss_type = loss_type + + if loss_type == "reg": + self.xgb_params = { + "max_depth": 3, + "gamma": 0.0001, + "min_child_weight": 1, + "subsample": 1.0, + "eta": 0.3, + "lambda": 1.00, + "alpha": 0, + "objective": "reg:linear", + } + elif loss_type in ("rank", "rank-binary"): + self.xgb_params = { + "max_depth": 3, + "gamma": 0.0001, + "min_child_weight": 1, + "subsample": 1.0, + "eta": 0.3, + "lambda": 1.00, + "alpha": 0, + "objective": "rank:pairwise", + } + else: + raise RuntimeError("Invalid loss type: " + loss_type) + + self.xgb_params["verbosity"] = 0 + if num_threads: + self.xgb_params["nthread"] = num_threads + self.bst = None + + if feature_type == "itervar": + self.feature_extract_func = _extract_itervar_feature_index + elif feature_type == "knob": + self.feature_extract_func = _extract_knob_feature_index + elif feature_type == "curve": + self.feature_extract_func = _extract_curve_feature_index + else: + raise RuntimeError("Invalid feature type " + feature_type) + + if upper_model: # share a same feature cache with upper model + self.feature_cache = upper_model.feature_cache + else: + self.feature_cache = FeatureCache() + self.upper_model = upper_model + self.feature_extra_ct = 0 + self.pool = None + self.base_model = None + + self._sample_size = 0 + self._reset_pool(self.space, self.target, self.task) + + def _reset_pool(self, space, target, task): + """reset processing pool for feature extraction""" + + if self.upper_model: # base model will reuse upper model's pool, + self.upper_model._reset_pool(space, target, task) + return + + self._close_pool() + + self.pool = PopenPoolExecutor( + max_workers=self.num_threads, + initializer=_extract_popen_initializer, + initargs=(space, target, task), + ) + + def _close_pool(self): + if self.pool: + self.pool = None + + def _get_pool(self): + if self.upper_model: + return self.upper_model._get_pool() + return self.pool + + def _base_model_discount(self): + return 1.0 / (2 ** (self._sample_size / 64.0)) + + def fit(self, xs, ys, plan_size): + tic = time.time() + self._reset_pool(self.space, self.target, self.task) + + x_train = self._get_feature(xs) + y_train = np.array(ys) + y_max = np.max(y_train) + y_train = y_train / max(y_max, 1e-8) + + valid_index = y_train > 1e-6 + index = np.random.permutation(len(x_train)) + dtrain = xgb.DMatrix(x_train[index], y_train[index]) + self._sample_size = len(x_train) + + if self.base_model: + discount = self._base_model_discount() + if discount < 0.05: # discard base model + self.base_model.upper_model = None + self.base_model = None + else: + dtrain.set_base_margin(discount * self.base_model.predict(xs, output_margin=True)) + + self.bst = xgb.train( + self.xgb_params, + dtrain, + num_boost_round=8000, + callbacks=[ + CustomCallback( + stopping_rounds=20, + metric=f"tr-a-recall@{plan_size}", + evals=[(dtrain, "tr")], + maximize=True, + fevals=[xgb_average_recalln_curve_score(plan_size)], + verbose_eval=self.log_interval, + loss_type=self.loss_type, + ) + ], + ) + + logger.debug( + "XGB train: %.2f\tobs: %d\terror: %d\tn_cache: %d", + time.time() - tic, + len(xs), + len(xs) - np.sum(valid_index), + self.feature_cache.size(self.fea_type), + ) + + def fit_log(self, records, plan_size, min_seed_records=500): + tic = time.time() + + # filter data, only pick the data with a same task + data = [] + for inp, res in records: + if inp.task.name == self.task.name: + data.append((inp, res)) + + logger.debug("XGB load %d entries from history log file", len(data)) + + # extract feature + self._reset_pool(self.space, self.target, self.task) + pool = self._get_pool() + if self.fea_type == "itervar": + feature_extract_func = _extract_itervar_feature_log + elif self.fea_type == "knob": + feature_extract_func = _extract_knob_feature_log + elif self.fea_type == "curve": + feature_extract_func = _extract_curve_feature_log + else: + raise RuntimeError("Invalid feature type: " + self.fea_type) + result = pool.map_with_error_catching(feature_extract_func, data) + result = list(result) # store results so we can iterate through them twice + + # get maximum feature length + fea_len = -1 + for res in result: + if res.status != StatusKind.COMPLETE: + continue + x, _ = res.value + fea_len = max(fea_len, x.shape[0]) + + xs, ys = [], [] + for res in result: + if res.status != StatusKind.COMPLETE: + continue + x, y = res.value + # Features may not be the same size, pad them until they are + if fea_len > len(x): + xs.append(np.pad(x, (0, fea_len - len(x)))) + else: + xs.append(x) + ys.append(y) + + if len(xs) < min_seed_records: # no enough samples + return False + + xs, ys = np.array(xs), np.array(ys) + x_train = xs + y_train = ys + y_max = np.max(y_train) + y_train = y_train / max(y_max, 1e-8) + + index = np.random.permutation(len(x_train)) + dtrain = xgb.DMatrix(x_train[index], y_train[index]) + + plan_size *= 2 + self.bst = xgb.train( + self.xgb_params, + dtrain, + num_boost_round=400, + callbacks=[ + CustomCallback( + stopping_rounds=100, + metric=f"tr-a-recall@{plan_size}", + evals=[(dtrain, "tr")], + maximize=True, + fevals=[xgb_average_recalln_curve_score(plan_size)], + verbose_eval=self.log_interval, + loss_type=self.loss_type, + ) + ], + ) + + logger.debug("XGB train: %.2f\tobs: %d", time.time() - tic, len(xs)) + + return True + + def predict(self, xs, output_margin=False): + feas = self._get_feature(xs) + dtest = xgb.DMatrix(feas) + + if self.base_model: + dtest.set_base_margin( + self._base_model_discount() * self.base_model.predict(xs, output_margin=True) + ) + + return self.bst.predict(dtest, output_margin=output_margin) + + def load_basemodel(self, base_model): + self.base_model = base_model + self.base_model._close_pool() + self.base_model.upper_model = self + + def spawn_base_model(self): + return XGBoostCostModel( + self.task, self.fea_type, self.loss_type, self.num_threads, self.log_interval, self + ) + + def _get_feature(self, indexes): + """get features for indexes, run extraction if we do not have cache for them""" + # free feature cache + if self.feature_cache.size(self.fea_type) >= 100000: + self.feature_cache.clear(self.fea_type) + + fea_cache = self.feature_cache.get(self.fea_type) + + indexes = np.array(indexes) + need_extract = [x for x in indexes if x not in fea_cache] + + if need_extract: + pool = self._get_pool() + feas = pool.map_with_error_catching(self.feature_extract_func, need_extract) + for i, fea in zip(need_extract, feas): + fea_cache[i] = fea.value if fea.status == StatusKind.COMPLETE else None + + feature_len = -1 + for idx in indexes: + if fea_cache[idx] is not None: + feature_len = max(fea_cache[idx].shape[-1], feature_len) + + ret = np.empty((len(indexes), feature_len), dtype=np.float32) + for i, ii in enumerate(indexes): + t = fea_cache[ii] + if t is not None and t.shape[0] < feature_len: + t = np.pad(t, (0, feature_len - t.shape[0])) + ret[i, :] = t if t is not None else 0 + return ret + + def __del__(self): + self._close_pool() + + +# Global variables for passing arguments to extract functions. +_extract_space = None +_extract_target = None +_extract_task = None + + +def _extract_popen_initializer(space, target, task): + global _extract_space, _extract_target, _extract_task + _extract_space = space + _extract_target = target + _extract_task = task + + +def _extract_itervar_feature_index(args): + """extract iteration var feature for an index in extract_space""" + config = _extract_space.get(args) + with _extract_target: + sch, fargs = _extract_task.instantiate(config) + + fea = feature.get_itervar_feature_flatten(sch, fargs, take_log=True) + fea = np.concatenate((fea, list(config.get_other_option().values()))) + return fea + + +def _extract_itervar_feature_log(arg): + """extract iteration var feature for log items""" + inp, res = arg + config = inp.config + with inp.target: + sch, args = inp.task.instantiate(config) + fea = feature.get_itervar_feature_flatten(sch, args, take_log=True) + x = np.concatenate((fea, list(config.get_other_option().values()))) + + if res.error_no == 0: + y = inp.task.flop / np.mean(res.costs) + else: + y = 0.0 + return x, y + + +def _extract_knob_feature_index(args): + """extract knob feature for an index in extract_space""" + config = _extract_space.get(args) + + return config.get_flatten_feature() + + +def _extract_knob_feature_log(arg): + """extract knob feature for log items""" + inp, res = arg + config = inp.config + x = config.get_flatten_feature() + + if res.error_no == 0: + with inp.target: # necessary, for calculating flops of this task + inp.task.instantiate(config) + y = inp.task.flop / np.mean(res.costs) + else: + y = 0.0 + return x, y + + +def _extract_curve_feature_index(args): + """extract sampled curve feature for an index in extract_space""" + config = _extract_space.get(args) + with _extract_target: + sch, fargs = _extract_task.instantiate(config) + + fea = feature.get_buffer_curve_sample_flatten(sch, fargs, sample_n=20) + fea = np.concatenate((fea, list(config.get_other_option().values()))) + return np.array(fea) + + +def _extract_curve_feature_log(arg): + """extract sampled curve feature for log items""" + inp, res = arg + config = inp.config + with inp.target: + sch, args = inp.task.instantiate(config) + fea = feature.get_buffer_curve_sample_flatten(sch, args, sample_n=20) + x = np.concatenate((fea, list(config.get_other_option().values()))) + + if res.error_no == 0: + y = inp.task.flop / np.mean(res.costs) + else: + y = 0.0 + return x, y + + +def _binarize_evals(evals): + """binarize evaluation labels""" + bin_evals = [] + for evalset in evals: + # binarize labels in xgb.dmatrix copy + barray = evalset[0].get_data().copy() + blabel = evalset[0].get_label().copy() + blabel[blabel < 0.5] = 0.0 + blabel[blabel >= 0.5] = 1.0 + # pylint: disable=R1721 + bin_evals.append(tuple([xgb.DMatrix(barray, blabel)] + [e for e in evalset[1:]])) + return bin_evals + + +class XGBoostCallback(TrainingCallback): + """Base class for XGBoost callbacks.""" + + def __call__(self, env: "xgb.core.CallbackEnv"): + # Compatibility with xgboost < 1.3 + return self.after_iteration(env.model, env.iteration, env.evaluation_result_list) + + def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict): + raise NotImplementedError + + +class CustomCallback(XGBoostCallback): + """ + Callback function for xgboost. + Support custom evaluation function and early-stopping. + """ + + def __init__( + self, + stopping_rounds, + metric, + fevals, + loss_type="reg", + evals=(), + log_file=None, + maximize=False, + verbose_eval=True, + skip_every=2, + ): + """Init function""" + self.stopping_rounds = stopping_rounds + self.metric = metric + self.metric_shortname = metric.split("-")[1] + self.fevals = fevals + self.evals = evals + self.log_file = log_file + self.maximize = maximize + self.verbose_eval = verbose_eval + self.loss_type = loss_type + self.skip_every = skip_every + self.state = {} + + def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict): + """Run after each iteration. Return True when training should stop.""" + # pylint:disable = import-outside-toplevel + try: + from xgboost.callback import _fmt_metric # type: ignore + except ImportError: + # Compatibility with xgboost >= 1.6 + def _fmt_metric(value, show_stdv=True): + """format metric string""" + if len(value) == 2: + return f"{value[0]}:{value[1]:.5f}" + if len(value) == 3: + if show_stdv: + return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}" + return f"{value[0]}:{value[1]:.5f}" + raise ValueError("wrong metric value", value) + + ##### init state ##### + if not self.state: + self.state["maximize_score"] = self.maximize + self.state["best_iteration"] = 0 + if self.maximize: + self.state["best_score"] = float("-inf") + else: + self.state["best_score"] = float("inf") + + assert model is not None + if model.attr("best_score") is not None: + self.state["best_score"] = float(model.attr("best_score")) + self.state["best_iteration"] = int(model.attr("best_iteration")) + self.state["best_msg"] = model.attr("best_msg") + else: + model.set_attr(best_iteration=str(self.state["best_iteration"])) + model.set_attr(best_score=str(self.state["best_score"])) + res_dict = {} + + if epoch % self.skip_every == 1: + return False + + ##### evaluation ##### + mod_evals = self.evals + if self.loss_type == "rank-binary": + mod_evals = _binarize_evals(self.evals) + + if self.loss_type == "rank" and int(xgb.__version__[0]) >= 2: + # since xgboost pr#8931 + raise RuntimeError( + "Use 'rank-binary' instead of 'rank' loss_type with xgboost %s >= 2.0.0" + % xgb.__version__ + ) + + for feval in self.fevals: + bst_eval = model.eval_set(mod_evals, epoch, feval) + res = [x.split(":") for x in bst_eval.split()] + for kv in res[1:]: + res_dict[kv[0]] = [float(kv[1])] + + eval_res = [] + keys = list(res_dict.keys()) + keys.sort(key=lambda x: x if self.metric_shortname not in x else "a" + x) + for key in keys: + v = res_dict[key] + eval_res.append([key] + v) + + ##### print eval result ##### + if ( + not isinstance(self.verbose_eval, bool) + and self.verbose_eval + and epoch % self.verbose_eval == 0 + ): + infos = [f"XGB iter: {epoch:3d}"] + for item in eval_res: + if "null" in item[0]: + continue + infos.append(f"{item[0]}: {item[1]:.6f}") + + logger.debug("\t".join(infos)) + if self.log_file: + with open(self.log_file, "a") as fout: + fout.write("\t".join(infos) + "\n") + + ##### choose score and do early stopping ##### + score = None + for item in eval_res: + if item[0] == self.metric: + score = item[1] + break + assert score is not None + + best_score = self.state["best_score"] + best_iteration = self.state["best_iteration"] + maximize_score = self.state["maximize_score"] + + if (maximize_score and score > best_score) or (not maximize_score and score < best_score): + msg = f"[{epoch}] " + "\t".join([_fmt_metric(x) for x in eval_res]) + self.state["best_msg"] = msg + self.state["best_score"] = score + self.state["best_iteration"] = epoch + # save the property to attributes, so they will occur in checkpoint. + if model is not None: + model.set_attr( + best_score=str(self.state["best_score"]), + best_iteration=str(self.state["best_iteration"]), + best_msg=self.state["best_msg"], + ) + elif epoch - best_iteration >= self.stopping_rounds: + best_msg = self.state["best_msg"] + if self.verbose_eval: + logger.debug("XGB stopped. Best iteration: %s ", best_msg) + return True + + return False + + +# feval wrapper for xgboost +def xgb_max_curve_score(N): + """evaluate max curve score for xgb""" + + def feval(preds, labels): + labels = labels.get_label() + trials = np.argsort(preds)[::-1] + scores = labels[trials] + curve = max_curve(scores) + return f"Smax@{N}", curve[N] / np.max(labels) + + return feval + + +def xgb_recalln_curve_score(N): + """evaluate recall-n curve score for xgb""" + + def feval(preds, labels): + labels = labels.get_label() + trials = np.argsort(preds)[::-1] + ranks = get_rank(labels[trials]) + curve = recall_curve(ranks) + return f"recall@{N}", curve[N] + + return feval + + +def xgb_average_recalln_curve_score(N): + """evaluate average recall-n curve score for xgb""" + + def feval(preds, labels): + labels = labels.get_label() + trials = np.argsort(preds)[::-1] + ranks = get_rank(labels[trials]) + curve = recall_curve(ranks) + return f"a-recall@{N}", np.sum(curve[:N]) / N + + return feval + + +def xgb_recallk_curve_score(N, topk): + """evaluate recall-k curve score for xgb""" + + def feval(preds, labels): + labels = labels.get_label() + trials = np.argsort(preds)[::-1] + ranks = get_rank(labels[trials]) + curve = recall_curve(ranks, topk) + return f"recall@{topk}", curve[N] + + return feval + + +def xgb_cover_curve_score(N): + """evaluate cover curve score for xgb""" + + def feval(preds, labels): + labels = labels.get_label() + trials = np.argsort(preds)[::-1] + ranks = get_rank(labels[trials]) + curve = cover_curve(ranks) + return f"cover@{N}", curve[N] + + return feval + + +def xgb_null_score(_): + """empty score function for xgb""" + + def feval(__, ___): + return "null", 0 + + return feval diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/xgboost_tuner.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/xgboost_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..0e77bf674bac621ed9b25156019a088de9814b44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/tuner/xgboost_tuner.py @@ -0,0 +1,110 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Tuner that uses xgboost as cost model""" + +from .model_based_tuner import ModelBasedTuner, ModelOptimizer +from .xgboost_cost_model import XGBoostCostModel +from .sa_model_optimizer import SimulatedAnnealingOptimizer + + +class XGBTuner(ModelBasedTuner): + """Tuner that uses xgboost as cost model + + Parameters + ---------- + task: Task + The tuning task + plan_size: int + The size of a plan. After `plan_size` trials, the tuner will refit a new cost model + and do planing for the next `plan_size` trials. + feature_type: str, optional + If is 'itervar', use features extracted from IterVar (loop variable). + If is 'knob', use flatten ConfigEntity directly. + If is 'curve', use sampled curve feature (relation feature). + + Note on choosing feature type: + For single task tuning, 'itervar' and 'knob' are good. + 'itervar' is more accurate but 'knob' is much faster. + There are some constraints on 'itervar', if you meet + problems with feature extraction when using 'itervar', + you can switch to 'knob'. + + For cross-shape tuning (e.g. many convolutions with different shapes), + 'itervar' and 'curve' has better transferability, + 'knob' is faster. + + For cross-device or cross-operator tuning, you can use 'curve' only. + loss_type: str + If is 'reg', use regression loss to train cost model. + The cost model predicts the normalized flops. + If is 'rank', use pairwise rank loss to train cost model. + The cost model predicts relative rank score. + If is 'rank-binary', use pairwise rank loss with binarized labels to train cost model. + The cost model predicts relative rank score. + + num_threads: int, optional + The number of threads. + + optimizer: str or ModelOptimizer, optional + If is 'sa', use a default simulated annealing optimizer. + Otherwise it should be a ModelOptimizer object. + + diversity_filter_ratio: int or float, optional + If is not None, the tuner will first select + top-(plan_size * diversity_filter_ratio) candidates according to the cost model + and then pick batch_size of them according to the diversity metric. + + log_interval: int = 50 + The verbose level. + If is 0, output nothing. + Otherwise, output debug information every `verbose` iterations. + """ + + def __init__( + self, + task, + plan_size=64, + feature_type="itervar", + loss_type="reg", + num_threads=None, + optimizer="sa", + diversity_filter_ratio=None, + log_interval=50, + ): + cost_model = XGBoostCostModel( + task, + feature_type=feature_type, + loss_type=loss_type, + num_threads=num_threads, + log_interval=log_interval // 2, + ) + if optimizer == "sa": + optimizer = SimulatedAnnealingOptimizer(task, log_interval=log_interval) + else: + assert isinstance(optimizer, ModelOptimizer), ( + "Optimizer must be " "a supported name string" "or a ModelOptimizer object." + ) + + super(XGBTuner, self).__init__( + task, cost_model, optimizer, plan_size, diversity_filter_ratio + ) + + def tune(self, *args, **kwargs): # pylint: disable=arguments-differ + super(XGBTuner, self).tune(*args, **kwargs) + + # manually close pool to avoid multiprocessing issues + self.cost_model._close_pool() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/utils.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..75db5208adbe4959b932923634f55058d91eb3a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/autotvm/utils.py @@ -0,0 +1,169 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Utilities""" +import logging +import time + +import numpy as np +import tvm.arith +from tvm.tir import expr +from tvm.contrib.popen_pool import PopenPoolExecutor + +logger = logging.getLogger("autotvm") + + +class EmptyContext(object): + """An empty context""" + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +def get_rank(values): + """get rank of items + + Parameters + ---------- + values: Array + + Returns + ------- + ranks: Array of int + the rank of this item in the input (the largest value ranks first) + """ + tmp = np.argsort(-values) + ranks = np.empty_like(tmp) + ranks[tmp] = np.arange(len(tmp)) + return ranks + + +def pool_map(func, args, batch_size, verbose=False, pool=None): + """A wrapper of multiprocessing.pool.Pool.map to support small-batch mapping + for large argument list. This can reduce memory usage + + Parameters + ---------- + func: Func(arg) -> np.ndarray + mapping function + args: List + list of arguments + batch_size: int + batch size in mapping + verbose: bool, optional + whether print progress + pool: multiprocessing.Pool, optional + pool objection + + Returns + ------- + converted numpy array + """ + + ret = None + tic = time.time() + local_pool = pool or PopenPoolExecutor() + if verbose: + logger.info("mapping begin") + for i in range(0, len(args), batch_size): + if verbose: + logger.info("mapping %d/%d elapsed %.2f", i, len(args), time.time() - tic) + tmp = np.array(local_pool.map(func, args[i : i + batch_size])) + ret = tmp if ret is None else np.concatenate((ret, tmp)) + if verbose: + logger.info("mapping done") + if not pool: + local_pool.close() + return ret + + +def get_func_name(func): + """Get name of a function + + Parameters + ---------- + func: Function + The function + Returns + ------- + name: str + The name + """ + + return func.func_name if hasattr(func, "func_name") else func.__name__ + + +def get_const_int(exp): + """Verifies expr is integer and get the constant value. + + Parameters + ---------- + exp : tvm.Expr or int + The input expression. + + Returns + ------- + out_value : int + The output. + """ + if isinstance(exp, int): + return exp + if not isinstance(exp, (expr.IntImm,)): + ana = tvm.arith.Analyzer() + exp = ana.simplify(exp) + if not isinstance(exp, (expr.IntImm,)): + raise ValueError("Expect value to be constant int") + return exp.value + + +def get_const_tuple(in_tuple): + """Verifies input tuple is IntImm or Var, returns tuple of int or Var. + + Parameters + ---------- + in_tuple : tuple of Expr + The input. + + Returns + ------- + out_tuple : tuple of int + The output. + """ + ret = [] + for elem in in_tuple: + if isinstance(elem, expr.Var): + ret.append(elem) + elif not isinstance(elem, (expr.IntImm, int)): + ana = tvm.arith.Analyzer() + elem = ana.simplify(elem) + if not isinstance(elem, (expr.IntImm)): + ret.append(elem) + else: + ret.append(get_const_int(elem)) + return tuple(ret) + + +SI_PREFIXES = "yzafpn\xb5m kMGTPEZY" +YOCTO_EXP10 = -24 + + +def format_si_prefix(x, si_prefix): + exp10 = 10 ** (SI_PREFIXES.index(si_prefix) * 3 + YOCTO_EXP10) + return float(x) / exp10 diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e887d65d8c7861fcf4a6905de58fdee708b996a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__init__.py @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Contrib APIs of TVM python package. + +Contrib API provides many useful not core features. +Some of these are useful utilities to interact with +thirdparty libraries and tools. +""" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f4603b9f426e28fb5799b569697094c7a98c95a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/__init__.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2952eae9381e146e8f30c068df701049c8018c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/__init__.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cblas.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cblas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c9d3d95f0ea8ddc0b9ad8922e384effcaf3cd13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cblas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cblas.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cblas.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cac7ed6c8607c4d513fff394715042a06f11e640 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cblas.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cc.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f712c98513740767f2c4b2a30d46e9e3c99cb797 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cc.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cc.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f518dd0b3b2d3c71a5a82ef5984185d92746d526 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cc.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/clang.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/clang.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f495c2ea20c880e8b40e222c783376de0c507d5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/clang.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/coreml_runtime.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/coreml_runtime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58de4095c87c4b69c7b101c9b83af43ced41f93e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/coreml_runtime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/coreml_runtime.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/coreml_runtime.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fa404c29676100c225da26f19a55dc78ff609d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/coreml_runtime.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublas.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8964fee15652e299ca741511afb21beeb3d6d2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublas.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublas.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ccd5164d5c8b1d05bce1d678d34d6b28cd02123 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublas.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublaslt.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublaslt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6e36628ca6dbdd694f3143a0f388193c816b035 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cublaslt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cudnn.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cudnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f021c9a7fd57e31231dbac29ddccf5b58ff3d11 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cudnn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cudnn.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cudnn.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d96a1760a59f938ab0e5b7a3a2902a214ad598a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/cudnn.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dlpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dlpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..580c6cee5f58549db3c4640154ff33175c8591ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dlpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dnnl.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dnnl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f36846ab3bf27f0f02fc24ec73a7019866c30c93 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dnnl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dnnl.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dnnl.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bacf255ba69494fe4947bf840b653a1bba8ea6a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/dnnl.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/download.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/download.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..307cc4f0e464ca0f011e5562ee638f9c2b041112 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/download.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/download.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/download.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b890ca8b659d2df702b423b93510d17e1b7f765 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/download.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/emcc.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/emcc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b760f3265461efba659098bad2fef373f99827d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/emcc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef509eaa1cf4c03f2815b4f2c4080ac88ed5245f Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_executor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_executor.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_executor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d9527a46e784f7f3fade2a454488972506f2f6b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_executor.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_runtime.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_runtime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2882188e51bdc62036abbf5a66d9961b857de89 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/graph_runtime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/hipcc.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/hipcc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..476d1c82e2e23af84d5c99dbc75450c0d7511089 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/hipcc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/hipcc.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/hipcc.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..242cf03bbf7201772d65431ea57b8343c8d1a893 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/hipcc.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/miopen.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/miopen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..303e289b99022e855447386ff98faa2cf2b29d0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/miopen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/miopen.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/miopen.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29b5fdfab6f3a8f2f68929cb53918960a711ac8d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/miopen.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mkl.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mkl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e05578dcddba69baf754a9a0254e2e66b109e26a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mkl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mkl.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mkl.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85d3c2ccbac8907d62c44bcf96e1d20c6fbccfd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mkl.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mps.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9beb90d7be6a4fbaa8d5eeaeca526e20d3e11771 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mps.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mrvl.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mrvl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b942782e565464d11b18c368e22de58635129c0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mrvl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mrvl.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mrvl.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f040f889f6bed1187a11bdf7b1159bd3cad7695f Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mrvl.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mxnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mxnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1328f868408252a87f28f608f884166efbc2ed3c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/mxnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/ndk.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/ndk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e38c8db8a207f647876f2369619ed1acec97977 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/ndk.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/ndk.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/ndk.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..252c613b8b93d85a5081e4143cc8ef1522042f9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/ndk.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nnpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nnpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c12d2ddf501348644a1d59694a2017b62c6972b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nnpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nnpack.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nnpack.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2a9e0ad08623a17fb252e3f67f36c3357bdad68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nnpack.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nvcc.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nvcc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c312269de033f9f0d3b57d816cb6e61018124d5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nvcc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nvcc.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nvcc.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c136314b4b7c2bc926cba411de2665db51e25ede Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/nvcc.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/peak.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/peak.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3e4f1547b3b507b5ff0ebc8928fd339187edc3b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/peak.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pickle_memoize.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pickle_memoize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10a41ebe6a1eac6423bae17c33d48a9ab0a31a37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pickle_memoize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pickle_memoize.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pickle_memoize.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8bce6ec49fb689f5a3b12036c6c95031519004c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pickle_memoize.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pipeline_executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pipeline_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b44fb8ce7bbb84bd5efe50ed793cb63fbd302e3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pipeline_executor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pipeline_executor_build.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pipeline_executor_build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc3b922171b5a228f9e834fd28b8badfa660c746 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/pipeline_executor_build.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/popen_pool.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/popen_pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..052be625ac96d489f7783b22942613933836f310 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/popen_pool.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/popen_pool.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/popen_pool.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba99ca78220661c6425930cb492963621920b19c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/popen_pool.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77a00ef6683ce0427e3b64fe52837cfcd09ddade Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocblas.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocblas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13265334661cc1874b37adcb933322a74f80d929 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocblas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocblas.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocblas.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..855df3d82f024b592de990b20258bd9537a16855 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocblas.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocm.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f641614096858a9c9b7f7c6c79ccbb37221438f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocm.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocm.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af67b2bc3677168b92fa241af57c664099f077c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rocm.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rpc.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rpc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f5163a097034344ee69ce126bfc61ca771e44d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/rpc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sdaccel.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sdaccel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..651feeff63a9e1b727f0ffbd83598f0c5f4960a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sdaccel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sdaccel.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sdaccel.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e8102e842367d5e3b563357a58664a55562fe5d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sdaccel.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sparse.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34a09a011aac4a09fb07a651da47f88d57bdeec3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/sparse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/stackvm.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/stackvm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdf4fdeaf7b04268a4d377817e290cb5263a69a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/stackvm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/stackvm.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/stackvm.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5ede245953bbcb58054575cc08e6aab3c527adf Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/stackvm.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tar.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e3e02ae9819c7497bff231fbeab588feb253b76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tar.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tar.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tar.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30a4736c2aa064eedadd91e64d6f439acfea161d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tar.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/thrust.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/thrust.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2173e22b97866de0c6e3beb894ea59bad0b9bf72 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/thrust.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/thrust.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/thrust.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a684fb0235253d427db640d435e00ac3dadde35b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/thrust.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tvmjs.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tvmjs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dde834953f68dad2c1dfd3623371e200c53336be Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/tvmjs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1362a025552577b86c1035fb67e8277e8e5c4645 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/utils.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..792f819a09246549f4274439227cd5547bc8de43 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/utils.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/xcode.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/xcode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2d4a30f69766ac61f3580a4e655093a71af00c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/xcode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/xcode.cpython-38.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/xcode.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..648e2fcf995ff382c5070f3872ae00e18f7aa5cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/__pycache__/xcode.cpython-38.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cblas.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cblas.py new file mode 100644 index 0000000000000000000000000000000000000000..1dfeb801b370df28490ceb137e911b6845e05a30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cblas.py @@ -0,0 +1,93 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to BLAS libraries.""" +import tvm +from tvm import te + + +def matmul(lhs, rhs, transa=False, transb=False, **kwargs): + """Create an extern op that compute matrix mult of A and rhs with CrhsLAS + This function serves as an example on how to call external libraries. + + Parameters + ---------- + lhs: Tensor + The left matrix operand + rhs: Tensor + The right matrix operand + transa: bool + Whether transpose lhs + transb: bool + Whether transpose rhs + + Returns + ------- + C: Tensor + The result tensor. + """ + n = lhs.shape[1] if transa else lhs.shape[0] + m = rhs.shape[0] if transb else rhs.shape[1] + return te.extern( + (n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cblas.matmul", ins[0], ins[1], outs[0], transa, transb + ), + name="C", + **kwargs, + ) + + +def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs): + """Create an extern op that compute batched matrix mult of A and rhs with CBLAS + This function serves as an example on how to call external libraries. + + Parameters + ---------- + lhs: Tensor + The left matrix operand + rhs: Tensor + The right matrix operand + transa: bool + Whether transpose lhs + transb: bool + Whether transpose rhs + + Returns + ------- + C: Tensor + The result tensor. + """ + b = te.max(lhs.shape[0], rhs.shape[0]) + n = lhs.shape[2] if transa else lhs.shape[1] + m = rhs.shape[1] if transb else rhs.shape[2] + return te.extern( + (b, n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cblas.batch_matmul" + if not iterative + else "tvm.contrib.cblas.batch_matmul_iterative", + ins[0], + ins[1], + outs[0], + transa, + transb, + ), + name="C", + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cc.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cc.py new file mode 100644 index 0000000000000000000000000000000000000000..59b57e08ba495cf63e0d8a50c9ce4726eee6857d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cc.py @@ -0,0 +1,415 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Util to invoke C/C++ compilers in the system.""" +import os +import shutil +import subprocess + +# pylint: disable=invalid-name +import sys +from typing import Dict + +from .._ffi.base import py_str +from . import tar as _tar +from . import utils as _utils + + +def _is_linux_like(): + return ( + sys.platform == "darwin" + or sys.platform.startswith("linux") + or sys.platform.startswith("freebsd") + ) + + +def _is_windows_like(): + return sys.platform == "win32" + + +def get_cc(): + """Return the path to the default C/C++ compiler. + + Returns + ------- + out: Optional[str] + The path to the default C/C++ compiler, or None if none was found. + """ + + if not _is_linux_like(): + return None + + env_cxx = os.environ.get("CXX") or os.environ.get("CC") + if env_cxx: + return env_cxx + cc_names = ["g++", "gcc", "clang++", "clang", "c++", "cc"] + dirs_in_path = os.get_exec_path() + for cc in cc_names: + for d in dirs_in_path: + cc_path = os.path.join(d, cc) + if os.path.isfile(cc_path) and os.access(cc_path, os.X_OK): + return cc_path + return None + + +def create_shared(output, objects, options=None, cc=None, cwd=None, ccache_env=None): + """Create shared library. + + Parameters + ---------- + output : str + The target shared library. + + objects : List[str] + List of object files. + + options : List[str] + The list of additional options string. + + cc : Optional[str] + The compiler command. + + cwd : Optional[str] + The current working directory. + + ccache_env : Optional[Dict[str, str]] + The environment variable for ccache. Set `None` to disable ccache by default. + """ + cc = cc or get_cc() + + if _is_linux_like(): + _linux_compile(output, objects, options, cc, cwd, ccache_env, compile_shared=True) + elif _is_windows_like(): + _windows_compile(output, objects, options, cwd, ccache_env) + else: + raise ValueError("Unsupported platform") + + +def _linux_ar(output, inputs, ar): + ar = ar or "ar" + + libname = os.path.basename(output) + if not libname.startswith("lib"): + libname = "lib" + libname + temp = _utils.tempdir() + temp_output = temp.relpath(libname) + cmd = [ar, "-crs", temp_output] + + # handles the case where some input files are tar of objects + # unpack them and return the list of files inside + objects = _tar.normalize_file_list_by_unpacking_tars(temp, inputs) + + cmd += objects + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + if proc.returncode != 0: + msg = "AR error:\n" + msg += py_str(out) + msg += "\nCommand line: " + " ".join(cmd) + raise RuntimeError(msg) + + shutil.move(temp_output, output) + + +def create_staticlib(output, inputs, ar=None): + """Create static library. + + Parameters + ---------- + output : str + The target shared library. + + inputs : List[str] + List of inputs files. Each input file can be a tarball + of objects or an object file. + + ar : Optional[str] + Path to the ar command to be used + """ + + if _is_linux_like(): + return _linux_ar(output, inputs, ar) + else: + raise ValueError("Unsupported platform") + + +def create_executable(output, objects, options=None, cc=None, cwd=None, ccache_env=None): + """Create executable binary. + + Parameters + ---------- + output : str + The target executable. + + objects : List[str] + List of object files. + + options : List[str] + The list of additional options string. + + cc : Optional[str] + The compiler command. + + cwd : Optional[str] + The urrent working directory. + + ccache_env : Optional[Dict[str, str]] + The environment variable for ccache. Set `None` to disable ccache by default. + """ + cc = cc or get_cc() + + if _is_linux_like(): + _linux_compile(output, objects, options, cc, cwd, ccache_env) + elif _is_windows_like(): + _windows_compile(output, objects, options, cwd, ccache_env) + else: + raise ValueError("Unsupported platform") + + +def get_global_symbol_section_map(path, *, nm=None) -> Dict[str, str]: + """Get global symbols from a library via nm -g + + Parameters + ---------- + path : str + The library path + + nm: str + The path to nm command + + Returns + ------- + symbol_section_map: Dict[str, str] + A map from defined global symbol to their sections + """ + if nm is None: + if not _is_linux_like(): + raise ValueError("Unsupported platform") + nm = "nm" + + symbol_section_map = {} + + if not os.path.isfile(path): + raise FileNotFoundError(f"{path} does not exist") + + cmd = [nm, "-gU", path] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = "Runtime error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + for line in py_str(out).split("\n"): + data = line.strip().split() + if len(data) != 3: + continue + symbol = data[-1] + section = data[-2] + symbol_section_map[symbol] = section + return symbol_section_map + + +def get_target_by_dump_machine(compiler): + """Functor of get_target_triple that can get the target triple using compiler. + + Parameters + ---------- + compiler : Optional[str] + The compiler. + + Returns + ------- + out: Callable + A function that can get target triple according to dumpmachine option of compiler. + """ + + def get_target_triple(): + """Get target triple according to dumpmachine option of compiler.""" + if compiler: + cmd = [compiler, "-dumpmachine"] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + if proc.returncode != 0: + msg = "dumpmachine error:\n" + msg += py_str(out) + return None + return py_str(out) + return None + + return get_target_triple + + +# assign so as default output format +create_shared.output_format = "so" if sys.platform != "win32" else "dll" +create_shared.get_target_triple = get_target_by_dump_machine(os.environ.get("CXX", get_cc())) + + +def cross_compiler( + compile_func, options=None, output_format=None, get_target_triple=None, add_files=None +): + """Create a cross compiler function by specializing compile_func with options. + + This function can be used to construct compile functions that + can be passed to AutoTVM measure or export_library. + + + Parameters + ---------- + compile_func : Union[str, Callable[[str, str, Optional[str]], None]] + Function that performs the actual compilation + + options : Optional[List[str]] + List of additional optional string. + + output_format : Optional[str] + Library output format. + + get_target_triple: Optional[Callable] + Function that can target triple according to dumpmachine option of compiler. + + add_files: Optional[List[str]] + List of paths to additional object, source, library files + to pass as part of the compilation. + + Returns + ------- + fcompile : Callable[[str, str, Optional[str]], None] + A compilation function that can be passed to export_library. + + Examples + -------- + .. code-block:: python + + from tvm.contrib import cc, ndk + # export using arm gcc + mod = build_runtime_module() + mod.export_library(path_dso, + fcompile=cc.cross_compiler("arm-linux-gnueabihf-gcc")) + # specialize ndk compilation options. + specialized_ndk = cc.cross_compiler( + ndk.create_shared, + ["--sysroot=/path/to/sysroot", "-shared", "-fPIC", "-lm"]) + mod.export_library(path_dso, fcompile=specialized_ndk) + """ + base_options = [] if options is None else options + kwargs = {} + add_files = [] if add_files is None else add_files + + # handle case where compile_func is the name of the cc + if isinstance(compile_func, str): + kwargs = {"cc": compile_func} + compile_func = create_shared + + def _fcompile(outputs, objects, options=None): + all_options = base_options + if options is not None: + all_options += options + compile_func(outputs, objects + add_files, options=all_options, **kwargs) + + if not output_format and hasattr(compile_func, "output_format"): + output_format = compile_func.output_format + output_format = output_format if output_format else "so" + + if not get_target_triple and hasattr(compile_func, "get_target_triple"): + get_target_triple = compile_func.get_target_triple + + _fcompile.output_format = output_format + _fcompile.get_target_triple = get_target_triple + return _fcompile + + +def _linux_compile( + output, objects, options, compile_cmd, cwd=None, ccache_env=None, compile_shared=False +): + cmd = [compile_cmd] + if compile_cmd != "nvcc": + if compile_shared or output.endswith(".so") or output.endswith(".dylib"): + cmd += ["-shared", "-fPIC"] + if sys.platform == "darwin": + cmd += ["-undefined", "dynamic_lookup"] + elif output.endswith(".obj"): + cmd += ["-c"] + else: + if compile_shared or output.endswith(".so") or output.endswith(".dylib"): + cmd += ["-shared"] + cmd += ["-o", output] + if isinstance(objects, str): + cmd += [objects] + else: + cmd += objects + if options: + cmd += options + env = None + if ccache_env is not None: + if shutil.which("ccache"): + cmd.insert(0, "ccache") + env = os.environ.copy() + env.update(ccache_env) + else: + raise ValueError("ccache not found") + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd, env=env) + (out, _) = proc.communicate() + if proc.returncode != 0: + msg = "Compilation error:\n" + msg += py_str(out) + msg += "\nCommand line: " + " ".join(cmd) + raise RuntimeError(msg) + + +def _windows_compile(output, objects, options, cwd=None, ccache_env=None): + cmd = ["clang"] + cmd += ["-O2"] + + if output.endswith(".so") or output.endswith(".dll"): + cmd += ["-shared"] + elif output.endswith(".obj"): + cmd += ["-c"] + + if isinstance(objects, str): + objects = [objects] + cmd += ["-o", output] + cmd += objects + if options: + cmd += options + env = None + if ccache_env is not None: + if shutil.which("ccache"): + cmd.insert(0, "ccache") + env = os.environ.copy() + env.update(ccache_env) + else: + raise ValueError("ccache not found") + + try: + proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd, env=env + ) + (out, _) = proc.communicate() + except FileNotFoundError: + raise RuntimeError( + "Can not find the LLVM clang for Windows clang.exe)." + "Make sure it's installed" + " and the installation directory is in the %PATH% environment " + "variable. Prebuilt binaries can be found at: https://llvm.org/" + ) + if proc.returncode != 0: + msg = "Compilation error:\n" + msg += " ".join(cmd) + "\n" + msg += py_str(out) + + raise RuntimeError(msg) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/clang.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/clang.py new file mode 100644 index 0000000000000000000000000000000000000000..16c465dc22ab5d158b6545715051229df2647b97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/clang.py @@ -0,0 +1,109 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Util to invoke clang in the system.""" +# pylint: disable=invalid-name +import subprocess + +from tvm._ffi.base import py_str +import tvm.target +from . import utils + + +def find_clang(required=True): + """Find clang in system. + + Parameters + ---------- + required : bool + Whether it is required, + runtime error will be raised if the compiler is required. + + Returns + ------- + valid_list : list of str + List of possible paths. + + Note + ---- + This function will first search clang that + matches the major llvm version that built with tvm + """ + cc_list = [] + major = tvm.target.codegen.llvm_version_major(allow_none=True) + if major is not None: + cc_list += [f"clang-{major}.0"] + cc_list += [f"clang-{major}"] + cc_list += ["clang"] + cc_list += ["clang.exe"] + valid_list = [utils.which(x) for x in cc_list] + valid_list = [x for x in valid_list if x] + if not valid_list and required: + raise RuntimeError("cannot find clang, candidates are: " + str(cc_list)) + return valid_list + + +def create_llvm(inputs, output=None, options=None, cc=None): + """Create llvm text ir. + + Parameters + ---------- + inputs : list of str + List of input files name or code source. + + output : str, optional + Output file, if it is none + a temporary file is created + + options : list + The list of additional options string. + + cc : str, optional + The clang compiler, if not specified, + we will try to guess the matched clang version. + + Returns + ------- + code : str + The generated llvm text IR. + """ + cc = cc if cc else find_clang()[0] + cmd = [cc] + cmd += ["-S", "-emit-llvm"] + temp = utils.tempdir() + output = output if output else temp.relpath("output.ll") + inputs = [inputs] if isinstance(inputs, str) else inputs + input_files = [] + for i, code in enumerate(inputs): + if utils.is_source_path(code): + input_files.append(code) + else: + temp_path = temp.relpath(f"input{i}.cc") + with open(temp_path, "w") as output_file: + output_file.write(code) + input_files.append(temp_path) + if options: + cmd += options + cmd += ["-o", output] + cmd += input_files + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + if proc.returncode != 0: + msg = "Compilation error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + return open(output).read() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/coreml_runtime.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/coreml_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..aa4f21279967f18fe65a65a82f3f7403a0b6a4fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/coreml_runtime.py @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""CoreML runtime that load and run coreml models.""" +import tvm._ffi +from ..rpc import base as rpc_base + + +def create(symbol, compiled_model_path, device): + """Create a runtime executor module given a coreml model and context. + Parameters + ---------- + symbol : str + The symbol that represents the Core ML model. + compiled_model_path : str + The path of the compiled model to be deployed. + device : Device + The device to deploy the module. It can be local or remote when there + is only one Device. + Returns + ------- + coreml_runtime : CoreMLModule + Runtime coreml module that can be used to execute the coreml model. + """ + device_type = device.device_type + runtime_func = "tvm.coreml_runtime.create" + + if device_type >= rpc_base.RPC_SESS_MASK: + fcreate = device._rpc_sess.get_function(runtime_func) + else: + fcreate = tvm._ffi.get_global_func(runtime_func) + assert fcreate, "Cannot find `tvm.coreml_runtime.create` function." + + return CoreMLModule(fcreate(symbol, compiled_model_path)) + + +class CoreMLModule(object): + """Wrapper runtime module. + + This is a thin wrapper of the underlying TVM module. + you can also directly call set_input, run, and get_output + of underlying module functions + + Parameters + ---------- + module : Module + The internal tvm module that holds the actual coreml functions. + + Attributes + ---------- + module : Module + The internal tvm module that holds the actual coreml functions. + """ + + def __init__(self, module): + self.module = module + self.invoke = module["invoke"] + self.set_input = module["set_input"] + self.get_output = module["get_output"] + self.get_num_outputs = module["get_num_outputs"] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cublas.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cublas.py new file mode 100644 index 0000000000000000000000000000000000000000..e01b09c3e4ee12fddc8a43a6c0a2511afcd32182 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cublas.py @@ -0,0 +1,86 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to cuBLAS libraries.""" +import tvm +from tvm import te + + +def matmul(lhs, rhs, transa=False, transb=False, dtype=None): + """Create an extern op that compute matrix mult of A and rhs with cuBLAS + + Parameters + ---------- + lhs : Tensor + The left matrix operand + rhs : Tensor + The right matrix operand + transa : bool + Whether transpose lhs + transb : bool + Whether transpose rhs + + Returns + ------- + C : Tensor + The result tensor. + """ + n = lhs.shape[1] if transa else lhs.shape[0] + m = rhs.shape[0] if transb else rhs.shape[1] + dtype = dtype if dtype is not None else lhs.dtype + return te.extern( + (n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cublas.matmul", ins[0], ins[1], outs[0], transa, transb + ), + dtype=dtype, + name="matmul_cublas", + ) + + +def batch_matmul(lhs, rhs, transa=False, transb=False, dtype=None): + """Create an extern op that compute batch matrix mult of A and rhs with cuBLAS + + Parameters + ---------- + lhs : Tensor + The left matrix operand + rhs : Tensor + The right matrix operand + transa : bool + Whether transpose lhs + transb : bool + Whether transpose rhs + + Returns + ------- + C : Tensor + The result tensor. + """ + b = lhs.shape[0] + n = lhs.shape[2] if transa else lhs.shape[1] + m = rhs.shape[1] if transb else rhs.shape[2] + dtype = dtype if dtype is not None else lhs.dtype + return te.extern( + (b, n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cublas.batch_matmul", ins[0], ins[1], outs[0], transa, transb + ), + dtype=dtype, + name="batch_matmul_cublas", + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cublaslt.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cublaslt.py new file mode 100644 index 0000000000000000000000000000000000000000..1c9fe7c7b561af1954ad1ba280b7402cfd816d26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cublaslt.py @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to cuBLASlt libraries.""" +import tvm +from tvm import te + + +def matmul(lhs, rhs, transa=False, transb=False, n=0, m=0, dtype=None): + """Create an extern op that compute matrix mult of A and rhs with cuBLAS + + Parameters + ---------- + lhs : Tensor + The left matrix operand + rhs : Tensor + The right matrix operand + transa : bool + Whether transpose lhs + transb : bool + Whether transpose rhs + + Returns + ------- + C : Tensor + The result tensor. + """ + if n == 0: + n = lhs.shape[1] if transa else lhs.shape[0] + if m == 0: + m = rhs.shape[0] if transb else rhs.shape[1] + dtype = dtype if dtype is not None else lhs.dtype + return te.extern( + (n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cublaslt.matmul", ins[0], ins[1], outs[0], transa, transb + ), + dtype=dtype, + name="C", + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..13a83393a9124bf6ec36540556b4808abd47e206 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e14d1c430e79bba1c22d036d70a199c7a6537413 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__pycache__/cuda_graph_executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__pycache__/cuda_graph_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db45b716d9c4e42106de8e1c0ee4119d6465a315 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/__pycache__/cuda_graph_executor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/cuda_graph_executor.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/cuda_graph_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..d047316eb5645fe372fc83fa83dbc29b72a820d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cuda_graph/cuda_graph_executor.py @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Graph executor with CUDA Graph""" +import tvm._ffi + +from tvm._ffi.base import string_types +from tvm.contrib import graph_executor + + +def create(graph_json_str, libmod, device): + """Create a runtime executor module given a graph and module. + + Parameters + ---------- + graph_json_str : str + The graph to be deployed in json format output by json graph. + The graph can contain operator(tvm_op) that points to the name + of PackedFunc in the libmod. + + libmod : tvm.runtime.Module + The module of the corresponding function + + device : Device + The device to deploy the module, only supports CUDA GPU + + Returns + ------- + graph_module : GraphModuleCudaGraph + CUDA graph executor module that can be used to execute the graph. + + Note + ---- + See also :py:class:`tvm.contrib.cuda_graph.cuda_graph_executor.GraphModuleCudaGraph` + for examples to directly construct a GraphModuleCudaGraph from an exported + relay compiled library. + """ + assert isinstance(graph_json_str, string_types) + try: + dev, num_rpc_dev, device_type_id = graph_executor.get_device(libmod, device) + if num_rpc_dev == len(dev): + fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor_cuda_graph.create") + else: + fcreate = tvm._ffi.get_global_func("tvm.graph_executor_cuda_graph.create") + except ValueError: + raise ValueError( + "To enable CUDA graph support (experimental), please set " + "'(USE_GRAPH_EXECUTOR_CUGRAPH ON)' in config.cmake and rebuild TVM" + ) + + return GraphModuleCudaGraph(fcreate(graph_json_str, libmod, *device_type_id)) + + +class GraphModuleCudaGraph(graph_executor.GraphModule): + """CUDA graph executor module. + + This is a CUDA graph executor wrapper over the TVM runtime. + Runtime interfaces are wrapped with CUDA graph functionalities. + + Parameters + ---------- + module : Module + The internal tvm module that holds the actual graph functions. + """ + + def __init__(self, module): + self._start_capture = module["start_capture"] + self._end_capture = module["end_capture"] + self._run_cuda_graph = module["run_cuda_graph"] + self._cuda_graph_captured = False + graph_executor.GraphModule.__init__(self, module) + + def capture_cuda_graph(self): + """Capture a CUDA graph for tvm_op graph + + This should be called before run_cuda_graph() to capture and + instantiate a CUDA graph instance. + """ + self._run() # call cuModuleLoadData before cudaStream API + self._start_capture() + self._run() + self._end_capture() + self._cuda_graph_captured = True + + def run_cuda_graph(self): + """Run the CUDA graph for tvm_op graph + + Run the captured CUDA graph instance instead of the + for-loop kernel launch of default graph executor + """ + self._run_cuda_graph() + + def run(self, **input_dict): + """A run wrapper for graph capture / launch, user can just + change default graph executor to cuda graph executor, and + the first call will capture a cuda graph for future launch + + Parameters + ---------- + input_dict: dict of str to NDArray + List of input values to be feed to + """ + if input_dict: + self.set_input(**input_dict) + if not self._cuda_graph_captured: + self.capture_cuda_graph() + else: + self._run_cuda_graph() + + def debug_get_output(self, node, out): + """Run graph up to node and get the output to out + + Parameters + ---------- + node : int / str + The node index or name + + out : NDArray + The output array container + """ + raise NotImplementedError("Please use debugger.debug_executor as graph_executor instead.") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cudnn.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cudnn.py new file mode 100644 index 0000000000000000000000000000000000000000..9d3f80a5c74b121bc4997c492a5174a79b9f9504 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cudnn.py @@ -0,0 +1,953 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to CuDNN v7 library.""" +# pylint: disable-msg=C0103 +import ctypes +import numpy as np +import tvm + +import tvm._ffi +from tvm import te + +# algos can be read from cudnn.h +_FWD_ALGOS = [ + "CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM", + "CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM", + "CUDNN_CONVOLUTION_FWD_ALGO_GEMM", + "CUDNN_CONVOLUTION_FWD_ALGO_DIRECT", + "CUDNN_CONVOLUTION_FWD_ALGO_FFT", + "CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING", + "CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD", + "CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED", + "CUDNN_CONVOLUTION_FWD_ALGO_COUNT", +] + + +def exists(): + """ + Checks whether the local machine can use CuDNN. + + Returns + ------- + exists: bool + + True if CuDNN support is enabled and a CuDNN-capable GPU + exists. Otherwise, False. + """ + func = tvm.get_global_func("tvm.contrib.cudnn.exists", allow_missing=True) + if func is None: + return False + + return bool(func()) + + +def algo_to_index(algo_type, algo_name): + """Return a index represents the algorithm, which can be used in + calling CuDNN function + + Parameters + ---------- + algo_type : str + ["fwd", "bwd_filter", "bwd_data] + + algo_name : str + algorithm name in cudnn definition + fwd = [ + "CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM", + "CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM", + "CUDNN_CONVOLUTION_FWD_ALGO_GEMM", + "CUDNN_CONVOLUTION_FWD_ALGO_DIRECT", + "CUDNN_CONVOLUTION_FWD_ALGO_FFT", + "CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING", + "CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD", + "CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED", + "CUDNN_CONVOLUTION_FWD_ALGO_COUNT", + ] + bwd_filter = [ + "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0", + # non-deterministic + "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1", + "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT", + "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3", + # non-deterministic, algo0 with workspaceS + "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD", + # not implemented + "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED", + "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING", + "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT", + ] + bwd_data = [ + "CUDNN_CONVOLUTION_BWD_DATA_ALGO_0", + # non-deterministic + "CUDNN_CONVOLUTION_BWD_DATA_ALGO_1", + "CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT", + "CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING", + "CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD", + "CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED", + "CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT", + ] + + Returns + ------- + algo: int + algorithm index + + """ + idx = -1 + if algo_type == "fwd": + idx = _FWD_ALGOS.index(algo_name) + elif algo_type == "bwd_filter": + idx = _BWD_FILTER_ALGOS.index(algo_name) + elif algo_type == "bwd_data": + idx = _BWD_DATA_ALGOS.index(algo_name) + assert idx >= 0 + return idx + + +def _get_np_int32_array_handle(arr): + """Return a void_p handle for a numpy array + + Parameters + ---------- + arr: numpy.NDArray + source numpy array + + Returns + ------- + ptr: ctypes.c_void_p + pointer to the data + """ + assert arr.dtype == np.int32 + ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) + return ctypes.cast(ptr, ctypes.c_void_p) + + +def _prepare_global_func_params(dims, pad, stride, dilation, x_shape=None, w_shape=None): + full_dims = dims + 2 + if x_shape: + assert isinstance(x_shape, list) + assert len(x_shape) == full_dims + if w_shape: + assert isinstance(w_shape, list) + assert len(w_shape) == full_dims + + pad = ( + np.full(dims, pad, dtype=np.int32) + if isinstance(pad, int) + else np.array(pad, dtype=np.int32) + ) + stride = ( + np.full(dims, stride, dtype=np.int32) + if isinstance(stride, int) + else np.array(stride, dtype=np.int32) + ) + dilation = ( + np.full(dims, dilation, dtype=np.int32) + if isinstance(dilation, int) + else np.array(dilation, dtype=np.int32) + ) + + xshape = np.array(x_shape, dtype=np.int32) if x_shape else None + wshape = np.array(w_shape, dtype=np.int32) if x_shape else None + + return pad, stride, dilation, xshape, wshape + + +def conv_output_shape( + tensor_format, pad, stride, dilation, x_shape, w_shape, data_dtype, conv_dtype, groups=1 +): + """Get output shape of 2D or 3D convolution + + Paramters + --------- + tensor_format: int + 0: CUDNN_TENSOR_NCHW + 1: CUDNN_TENSOR_NHWC + 2: CUDNN_TENSOR_NCHW_VECT_C + pad: int or list + padding + stride: int or list + stride + dilation: int or list + dilation + x_shape: list + input shape + w_shape: list + weight shape + data_dtype: str + data type + conv_dtype: str + convolution type + groups: int + number of groups + + Returns + ------- + oshape: list + output shape + """ + + assert len(x_shape) == len(w_shape) + assert len(x_shape) in (4, 5) + + if tensor_format == 0: + n_output = x_shape[0] + c_output = w_shape[0] + x_chan = x_shape[1] + w_chan_input = w_shape[1] + x_shape = x_shape[2:] + w_shape = w_shape[2:] + + elif tensor_format == 1: + n_output = x_shape[0] + c_output = w_shape[0] + x_chan = x_shape[-1] + w_chan_input = w_shape[-1] + assert len(x_shape) == 4, "CuDNN layout NHWC is only well-defined for 4d tensors" + x_shape = x_shape[1:-1] + w_shape = w_shape[1:-1] + + elif tensor_format == 2: + n_output = x_shape[0] + c_output = w_shape[0] + x_chan = x_shape[1] + w_chan_input = w_shape[1] + w_lanes = tvm.runtime.DataType(conv_dtype).lanes + assert w_lanes == 1 + x_shape = x_shape[2:] + w_shape = w_shape[2:] + + else: + raise ValueError(f"Unknown CuDNN tensor format: '{tensor_format}'") + + x_lanes = tvm.runtime.DataType(data_dtype).lanes + assert x_chan * x_lanes == w_chan_input * groups, ( + "Mismatched dimensions, data has {} channels/group " + "(dimension {} with {} lanes/value, {} groups), " + "but weights require {} input channels/group" + ).format(x_chan // groups, x_chan, x_lanes, groups, w_chan_input) + + output_dims = [] + for x_shape_i, w_shape_i, pad_i, stride_i, dilation_i in zip( + x_shape, w_shape, pad, stride, dilation + ): + output_dim = 1 + (x_shape_i + 2 * pad_i - (((w_shape_i - 1) * dilation_i) + 1)) // stride_i + output_dims.append(output_dim) + + if tensor_format in [0, 2]: + output = [n_output, c_output, *output_dims] + elif tensor_format == 1: + output = [n_output, *output_dims, c_output] + else: + raise ValueError(f"Unknown CuDNN tensor format: '{tensor_format}'") + + return output + + +def conv_dgrad_shape( + tensor_format, pad, stride, dilation, dy_shape, w_shape, output_padding=(0, 0), groups=1 +): + """Get output shape of conv2d gradient with respect to data + + Paramters + --------- + tensor_format: int + 0: CUDNN_TENSOR_NCHW + 1: CUDNN_TENSOR_NHWC + pad: int or list + padding + stride: int or list + stride + dilation: int or list + dilation + dy_shape: list + output gradient shape + w_shape: list + weight shape + data_dtype: str + data type + conv_dtype: str + convolution type + groups: int + number of groups + + Returns + ------- + oshape: list + output shape + """ + + assert len(dy_shape) == len(w_shape) + assert len(dy_shape) == 4 + + if tensor_format == 0: + N = dy_shape[0] + C = w_shape[1] * groups + dy_shape = dy_shape[2:] + w_shape = w_shape[2:] + elif tensor_format == 1: + N = dy_shape[0] + C = w_shape[-1] * groups + dy_shape = dy_shape[1:-1] + w_shape = w_shape[1:-1] + else: + raise ValueError(f"Unsupported CuDNN tensor format: '{tensor_format}'") + + input_dims = [] + for dy_shape_i, w_shape_i, pad_i, stride_i, dilation_i, out_pad in zip( + dy_shape, w_shape, pad, stride, dilation, output_padding + ): + input_dim = ( + (dy_shape_i - 1) * stride_i - 2 * pad_i + (((w_shape_i - 1) * dilation_i) + 1) + out_pad + ) + input_dims.append(input_dim) + + if tensor_format == 0: + output = [N, C, *input_dims] + else: + output = [N, *input_dims, C] + + return output + + +def _conv_find_algo( + func_name, + tensor_format, + pad, + stride, + dilation, + x_shape, + w_shape, + y_shape, + data_dtype, + conv_dtype, + groups=1, + verbose=False, +): + """ + Common function to choose the best cudnn convolution algorithm for the given input + and the convolution type. + """ + dims = len(x_shape) + assert dims in (4, 5) + + pad, stride, dilation, xshape, wshape = _prepare_global_func_params( + dims - 2, pad, stride, dilation, x_shape, w_shape + ) + yshape = np.array(y_shape, dtype=np.int32) + func = tvm._ffi.get_global_func(func_name) + return func( + tensor_format, + dims - 2, + _get_np_int32_array_handle(pad), + _get_np_int32_array_handle(stride), + _get_np_int32_array_handle(dilation), + _get_np_int32_array_handle(xshape), + _get_np_int32_array_handle(wshape), + _get_np_int32_array_handle(yshape), + data_dtype, + conv_dtype, + groups, + verbose, + ) + + +def conv_forward_find_algo( + tensor_format, + pad, + stride, + dilation, + x_shape, + w_shape, + y_shape, + data_dtype, + conv_dtype, + groups=1, + verbose=True, +): + """Choose the best forward algorithm for the given input. + + Paramters + --------- + tensor_format: int + 0: CUDNN_TENSOR_NCHW + 1: CUDNN_TENSOR_NHWC + 2: CUDNN_TENSOR_NCHW_VECT_C + pad: int or list + padding + stride: int or list + stride + dilation: int or list + dilation + x_shape: list + input shape + w_shape: list + weight shape + y_shape: list + output shape + data_dtype: str + data type + conv_dtype: str + convolution type + groups: int + number of groups + + Returns + ------- + algo: int + algo chosen by CUDNN + """ + return _conv_find_algo( + "tvm.contrib.cudnn.conv.forward_find_algo", + tensor_format, + pad, + stride, + dilation, + x_shape, + w_shape, + y_shape, + data_dtype, + conv_dtype, + groups, + verbose, + ) + + +def conv_backward_data_find_algo( + tensor_format, + pad, + stride, + dilation, + dy_shape, + w_shape, + dx_shape, + data_dtype, + conv_dtype, + groups=1, + verbose=True, +): + """Choose the best backward data algorithm for the given input. + + Paramters + --------- + tensor_format: int + 0: CUDNN_TENSOR_NCHW + 1: CUDNN_TENSOR_NHWC + 2: CUDNN_TENSOR_NCHW_VECT_C + pad: int or list + padding + stride: int or list + stride + dilation: int or list + dilation + dy_shape: list + output gradient shape + w_shape: list + weight shape + dx_shape: list + dgrad shape + data_dtype: str + data type + conv_dtype: str + convolution type + groups: int + number of groups + verbose: bool + whether to show the selection trials + + Returns + ------- + algo: int + algo chosen by CUDNN + """ + return _conv_find_algo( + "tvm.contrib.cudnn.conv.backward_data_find_algo", + tensor_format, + pad, + stride, + dilation, + dy_shape, + w_shape, + dx_shape, + data_dtype, + conv_dtype, + groups, + verbose, + ) + + +def conv_backward_filter_find_algo( + tensor_format, + pad, + stride, + dilation, + dy_shape, + x_shape, + dw_shape, + data_dtype, + conv_dtype, + groups=1, + verbose=True, +): + """Choose the best backward filter algorithm for the given input. + + Paramters + --------- + tensor_format: int + 0: CUDNN_TENSOR_NCHW + 1: CUDNN_TENSOR_NHWC + 2: CUDNN_TENSOR_NCHW_VECT_C + pad: int or list + padding + stride: int or list + stride + dilation: int or list + dilation + dy_shape: list + output gradient shape + x_shape: list + weight shape + dw_shape: list + wgrad shape + data_dtype: str + data type + conv_dtype: str + convolution type + groups: int + number of groups + verbose: bool + whether to show the selection trials + + Returns + ------- + algo: int + algo chosen by CUDNN + """ + return _conv_find_algo( + "tvm.contrib.cudnn.conv.backward_filter_find_algo", + tensor_format, + pad, + stride, + dilation, + dy_shape, + x_shape, + dw_shape, + data_dtype, + conv_dtype, + groups, + verbose, + ) + + +def conv_forward( + x, w, pad, stride, dilation, conv_mode, tensor_format, algo, conv_dtype, groups=1, verbose=True +): + """Create an extern op that compute 2D or 3D convolution with CuDNN + + Parameters + ---------- + x: Tensor + input feature map + w: Tensor + convolution weight + pad: int or list + padding + stride: int or list + stride + dilation: int or list + dilation + conv_mode: int + 0: CUDNN_CONVOLUTION + 1: CUDNN_CROSS_CORRELATION + tensor_format: int + 0: CUDNN_TENSOR_NCHW + 1: CUDNN_TENSOR_NHWC + 2: CUDNN_TENSOR_NCHW_VECT_C + algo: int + Forward algorithm, get index from ```algo_to_index``` function + if algo == -1, the best algo will be chosen by CUDNN + conv_dtype: str + convolution type + groups: int + the number of groups + verbose: bool + whether to show the selection trials + + Returns + ------- + y: Tensor + The result tensor + """ + dims = len(x.shape) + assert dims in (4, 5) + + conv_dtype = x.dtype if conv_dtype is None else conv_dtype + pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation) + + x_shape = list(x.shape) + + if isinstance(x.shape[0], tvm.tir.expr.IntImm): + oshape = conv_output_shape( + tensor_format, + pad, + stride, + dilation, + x_shape, + list(w.shape), + x.dtype, + conv_dtype, + groups, + ) + if algo == -1: + # For now if we try to call `cudnnFindConvolutionForwardAlgorithm` when + # using INT8 data type, CuDNN will crash down. + # On the other hand, CuDNN only support IMPLICIT_PRECOMP_GEMM at NHWC format + if tensor_format == 1 and conv_dtype == "int32": + algo = 1 + else: + algo = conv_forward_find_algo( + tensor_format, + pad, + stride, + dilation, + list(x.shape), + list(w.shape), + oshape, + x.dtype, + conv_dtype, + groups, + verbose, + ) + else: + # The dynamic batch size case, pretend this is a single batch + x_shape[0] = 1 + oshape = conv_output_shape( + tensor_format, + pad, + stride, + dilation, + x_shape, + list(w.shape), + x.dtype, + conv_dtype, + groups, + ) + oshape[0] = x.shape[0] + # This picks CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM + # It seems this is the fastest among algorithms that are always applicable + algo = 1 + + if dims == 4: + return te.extern( + oshape, + [x, w], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cudnn.conv2d.forward", + conv_mode, + tensor_format, + algo, + pad[0], + pad[1], + stride[0], + stride[1], + dilation[0], + dilation[1], + ins[0], + ins[1], + outs[0], + conv_dtype, + groups, + ), + name="y", + ) + + return te.extern( + oshape, + [x, w], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cudnn.conv3d.forward", + conv_mode, + tensor_format, + algo, + pad[0], + pad[1], + pad[2], + stride[0], + stride[1], + stride[2], + dilation[0], + dilation[1], + dilation[2], + ins[0], + ins[1], + outs[0], + conv_dtype, + groups, + ), + name="y", + ) + + +def conv_backward_data( + dy, + w, + pad, + stride, + dilation, + conv_mode, + tensor_format, + conv_dtype, + groups=1, + output_padding=(0, 0), +): + """Create a CuDNN extern op that computes the gradient of 2D convolution with respect to data. + + Parameters + ---------- + dy: Tensor + output gradient + w: Tensor + convolution weight + pad: int or list + padding + stride: int or list + stride + dilation: int or list + dilation + conv_mode: int + 0: CUDNN_CONVOLUTION + 1: CUDNN_CROSS_CORRELATION + tensor_format: int + 0: CUDNN_TENSOR_NCHW + 1: CUDNN_TENSOR_NHWC + conv_dtype: str + convolution type + groups: int + the number of groups + + Returns + ------- + dx: Tensor + dgrad tensor + """ + dims = len(dy.shape) + assert dims == 4 + + conv_dtype = dy.dtype if conv_dtype is None else conv_dtype + pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation) + + assert isinstance( + dy.shape[0], tvm.tir.expr.IntImm + ), "Dynamic batch is not supported for cudnn conv2d backwad data yet." + + dx_shape = conv_dgrad_shape( + tensor_format, pad, stride, dilation, dy.shape, w.shape, output_padding, groups + ) + + if exists(): + # When cudnn exists, find the backward data algo + algo = conv_backward_data_find_algo( + tensor_format, + pad, + stride, + dilation, + list(dy.shape), + list(w.shape), + dx_shape, + dy.dtype, + conv_dtype, + groups, + True, + ) + else: + algo = 1 + + return te.extern( + dx_shape, + [dy, w], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cudnn.conv2d.backward_data", + conv_mode, + tensor_format, + algo, + pad[0], + pad[1], + stride[0], + stride[1], + dilation[0], + dilation[1], + ins[0], + ins[1], + outs[0], + conv_dtype, + groups, + ), + name="dx", + ) + + +def conv_backward_filter( + dy, x, kernel_size, pad, stride, dilation, conv_mode, tensor_format, conv_dtype, groups=1 +): + """Create a CuDNN extern op that computes the gradient of 2D convolution with respect to weight. + + Parameters + ---------- + dy: Tensor + output gradient + x: Tensor + input tensor + kernel_size: a pair of int + The spatial size of the corresponding forward convolution kernel + pad: int or list + padding + stride: int or list + stride + dilation: int or list + dilation + conv_mode: int + 0: CUDNN_CONVOLUTION + 1: CUDNN_CROSS_CORRELATION + tensor_format: int + 0: CUDNN_TENSOR_NCHW + 1: CUDNN_TENSOR_NHWC + conv_dtype: str + convolution type + groups: int + the number of groups + + Returns + ------- + dw: Tensor + wgrad tensor + """ + dims = len(x.shape) + assert dims == 4 + + conv_dtype = x.dtype if conv_dtype is None else conv_dtype + pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation) + filter_h, filter_w = kernel_size + + x_shape = list(x.shape) + + assert isinstance( + x.shape[0], tvm.tir.expr.IntImm + ), "Dynamic batch is not supported for cudnn conv2d backwad filter yet." + + ic_ind = 1 if tensor_format == 0 else 3 + + if groups > 1: + assert ( + x_shape[ic_ind] == dy.shape[ic_ind] and x_shape[ic_ind] == groups + ), "Only depthwise wgrad supported for groups > 1." + ic = 1 + else: + ic = x_shape[ic_ind] + + if tensor_format == 0: + dw_shape = [dy.shape[1], ic, filter_h, filter_w] + else: + dw_shape = [dy.shape[3], filter_h, filter_w, ic] + + algo = conv_backward_filter_find_algo( + tensor_format, + pad, + stride, + dilation, + list(dy.shape), + list(x.shape), + dw_shape, + x.dtype, + conv_dtype, + groups, + True, + ) + + return te.extern( + dw_shape, + [dy, x], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cudnn.conv2d.backward_filter", + conv_mode, + tensor_format, + algo, + pad[0], + pad[1], + stride[0], + stride[1], + dilation[0], + dilation[1], + ins[0], + ins[1], + outs[0], + conv_dtype, + groups, + ), + name="dw", + ) + + +def softmax(x, axis=-1): + """Compute softmax using CuDNN + + Parameters + ---------- + x : tvm.te.Tensor + The input tensor + + axis : int + The axis to compute the softmax + + Returns + ------- + ret : tvm.te.Tensor + The result tensor + """ + return te.extern( + x.shape, + [x], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cudnn.softmax.forward", ins[0], outs[0], axis + ), + name="y", + ) + + +def log_softmax(x, axis=-1): + """Compute log_softmax using CuDNN + + Parameters + ---------- + x : tvm.te.Tensor + The input tensor + + axis : int + The axis to compute log softmax over + + Returns + ------- + ret : tvm.te.Tensor + The result tensor + """ + return te.extern( + x.shape, + [x], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.cudnn.log_softmax.forward", ins[0], outs[0], axis + ), + name="y", + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4b56ac4e164ad89850c57423619941e17b4da476 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__init__.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""BYOC support for CUTLASS.""" +from .build import has_cutlass, num_cutlass_partitions, finalize_modules, finalize_modules_vm diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c897c729a5e4be123396686cf46e610a5493474 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/_ffi_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/_ffi_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58d3c63a1a6a28f600702912c474bb523f0433e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/_ffi_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/attention_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/attention_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..341be2b77a35a65faf93fd133c0ca77ba0c83474 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/attention_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/build.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..355600632a113fd6b1ae482f5a0c25f2f749b9d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/build.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/conv2d_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/conv2d_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a83b46cc819faa5508233aeda2d2df02540b211b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/conv2d_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/conv2d_profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/conv2d_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d65717b7cab6615dcac04d369615837e4997d872 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/conv2d_profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gemm_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gemm_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49364493386187309803876daf672b7e57b3f618 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gemm_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gemm_profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gemm_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19df935d001f822eff1acb71a7fb2ffcfac08303 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gemm_profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_conv2d.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_conv2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b6011f55a41153be53b5f9ddfb39a2f4558e4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_conv2d.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_gemm.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1afa818672dbd310e9ec85a22acaf7f244937ea1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_gemm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_tensor_op.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_tensor_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3553392fb0d9523e0e7c1bab390db78ea3e26134 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/gen_tensor_op.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/layer_norm_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/layer_norm_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea871351a30e2ab99ec6cc8d27c3a300fbaef219 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/layer_norm_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/library.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/library.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6705d98435a1fe8967d44a7cb54230b23a0c4626 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/library.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/rms_norm_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/rms_norm_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b0b01665c5f96b2c5ebf54a7a975c44cdbdc184 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/__pycache__/rms_norm_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/_ffi_api.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/_ffi_api.py new file mode 100644 index 0000000000000000000000000000000000000000..e71eb8c13f19f73acb252e3c0778b5455cb3086e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/_ffi_api.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""FFI API for CUTLASS BYOC.""" +import tvm._ffi + +tvm._ffi._init_api("contrib.cutlass", __name__) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/attention_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/attention_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..518778ec52edb82ca02b404a87bd543e77db7bca --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/attention_operation.py @@ -0,0 +1,333 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Generator for CUTLASS attention kernels.""" +from .library import substitute_template + + +def instantiate_attention_template(attrs): + """Return CUTLASS host code for fused multi head attention + based on a template and the provided attribute map.""" + + bias_template = """ + CHECK(${bias}->ndim == 4); // B, N, S, S' + + p.attn_bias_ptr = reinterpret_cast(${bias}->data); + p.bias_strideM = ${bias_strideM}; + p.bias_strideH = ${bias_strideH}; + p.bias_strideB = ${bias_strideB}; +""" + + var_len_template = """ + p.seqstart_q_ptr = (int32_t*)${seqstart_q}->data; + p.seqstart_k_ptr = (int32_t*)${seqstart_k}->data; + p.num_queries = ((int32_t*)${max_seqlen_q}->data)[0]; + p.num_batches = ${seqstart_q}->shape[0] - 1; +""" + + qkv_template = { + "default": """ + p.query_ptr = reinterpret_cast(${query}->data); + p.key_ptr = reinterpret_cast(${key}->data); + p.value_ptr = reinterpret_cast(${value}->data); + CHECK(${query}->ndim == 4); // B, S, N, H + CHECK(${key}->ndim == 4); // B, S', N, H + CHECK(${value}->ndim == 4); // B, S', N, H' + + // stride for N + p.q_strideH = p.head_dim; // H + p.k_strideH = p.head_dim; // H + p.v_strideH = p.head_dim_value; // H' + + // stride for S + p.q_strideM = p.q_strideH * p.num_heads; // H * N + p.k_strideM = p.k_strideH * p.num_heads; // H * N + p.v_strideM = p.v_strideH * p.num_heads; // H' * N + + // stride for B + p.q_strideB = p.q_strideM * p.num_queries; // H * N * S + p.k_strideB = p.k_strideM * p.num_keys; // H * N * S' + p.v_strideB = p.v_strideM * p.num_keys; // H'* N * S' +""", + "qkv_stacked": """ + p.query_ptr = reinterpret_cast(${qkv}->data); + p.key_ptr = reinterpret_cast(${qkv}->data) + p.head_dim * p.num_heads; + p.value_ptr = reinterpret_cast(${qkv}->data) + p.head_dim * p.num_heads * 2; + CHECK(${qkv}->ndim == 3); // B, S, NH + NH + NH' + + // stride for N + p.q_strideH = p.head_dim; // H + p.k_strideH = p.head_dim; // H + p.v_strideH = p.head_dim_value; // H' + + // stride for S + p.q_strideM = p.k_strideM = p.v_strideM = + p.q_strideH * p.num_heads + + p.k_strideH * p.num_heads + + p.v_strideH * p.num_heads; // H * N + H * N + H * N' + + // stride for B + p.q_strideB = p.k_strideB = p.v_strideB = + p.q_strideM * p.num_queries; // (H * N + H * N + H * N') * S +""", + } + + template = """ + using T = ${data_type}; + + using Attention = + AttentionKernel; + + typename Attention::Params p; + p.logsumexp_ptr = nullptr; + p.output_ptr = reinterpret_cast(out0->data); + + p.output_accum_ptr = nullptr; + uint64_t accumulator_buf_size = ${output_size} * sizeof(Attention::output_accum_t); + bool accumulator_buf_allocated = false; + if (Attention::kNeedsOutputAccumulatorBuffer) { + if (accumulator_buf_size <= ${workspace}->shape[0]) { + p.output_accum_ptr = static_cast(${workspace}->data); + } else { + accumulator_buf_size = true; + cudaMalloc( + &p.output_accum_ptr, + accumulator_buf_size + ); + } + } + + p.num_heads = ${num_heads}; // N + p.num_batches = ${num_batches}; // B + p.head_dim = ${head_dim}; // H + p.head_dim_value = ${head_dim_value}; // H' + p.num_queries = ${num_queries}; // S + p.num_keys = ${num_keys}; // S' + p.scale = ${scale}; + p.custom_mask_type = ${custom_mask_type}; + + + p.o_strideM = p.head_dim_value * p.num_heads; // H' * N + CHECK(out0->ndim == 4); // B, S, N, H' + + ${qkv_template} + ${bias_template} + ${var_len_template} + + constexpr auto kernel_fn = attention_kernel_batched_impl; + int smem_bytes = sizeof(typename Attention::SharedStorage); + if (smem_bytes > 0xc000) { + static bool once = [&]() { + cudaFuncSetAttribute( + kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes); + return true; + }(); + } + + CHECK(Attention::check_supported(p)); + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + + kernel_fn<<>>(p); + + if (accumulator_buf_allocated) { + cudaFree(p.output_accum_ptr); + } +""" + + template = substitute_template( + template, + { + "qkv_template": qkv_template[attrs["qkv_layout"]], + "bias_template": bias_template if "bias" in attrs else "", + "var_len_template": var_len_template if "seqstart_q" in attrs else "", + }, + ) + + return substitute_template(template, attrs) + + +def instantiate_flash_attention_template(attrs): + """Return host code for flash attention.""" + + template = """ + int q_head_stride = ${head_dim}; + int k_head_stride = ${head_dim}; + int v_head_stride = ${head_dim}; + int o_head_stride = ${head_dim}; + int q_row_stride = q_head_stride * ${num_q_heads}; + int k_row_stride = k_head_stride * ${num_kv_heads}; + int v_row_stride = v_head_stride * ${num_kv_heads}; + int o_row_stride = o_head_stride * ${num_q_heads}; + int q_batch_stride = q_row_stride * ${num_queries}; + int k_batch_stride = k_row_stride * ${num_keys}; + int v_batch_stride = v_row_stride * ${num_keys}; + int o_batch_stride = o_row_stride * ${num_queries}; + + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + + flash_attn::flash_attention_forward( + static_cast(${query}->data), + static_cast(${key}->data), + static_cast(${value}->data), + static_cast(out0->data), + ${num_batches}, + ${num_queries}, + ${num_keys}, + ${num_q_heads}, + ${num_kv_heads}, + ${head_dim}, + q_batch_stride, + k_batch_stride, + v_batch_stride, + o_batch_stride, + q_head_stride, + k_head_stride, + v_head_stride, + o_head_stride, + q_row_stride, + k_row_stride, + v_row_stride, + o_row_stride, + ${scale}, + ${is_causal}, + ${window_size_left}, + ${window_size_right}, + stream); + """ + + template_stacked = """ + int q_head_stride = ${head_dim}; + int k_head_stride = ${head_dim}; + int v_head_stride = ${head_dim}; + int o_head_stride = ${head_dim}; + int row_stride = q_head_stride * ${num_q_heads} + + k_head_stride * ${num_kv_heads} + + v_head_stride * ${num_kv_heads}; + int q_row_stride = row_stride; + int k_row_stride = row_stride; + int v_row_stride = row_stride; + int o_row_stride = o_head_stride * ${num_q_heads}; + + int q_batch_stride = q_row_stride * ${num_queries}; + int k_batch_stride = k_row_stride * ${num_keys}; + int v_batch_stride = v_row_stride * ${num_keys}; + int o_batch_stride = o_row_stride * ${num_queries}; + + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + + flash_attn::flash_attention_forward( + static_cast(${qkv}->data), + static_cast(${qkv}->data) + ${head_dim} * ${num_q_heads}, + static_cast(${qkv}->data) + ${head_dim} * (${num_q_heads} + ${num_kv_heads}), + static_cast(out0->data), + ${num_batches}, + ${num_queries}, + ${num_keys}, + ${num_q_heads}, + ${num_kv_heads}, + ${head_dim}, + q_batch_stride, + k_batch_stride, + v_batch_stride, + o_batch_stride, + q_head_stride, + k_head_stride, + v_head_stride, + o_head_stride, + q_row_stride, + k_row_stride, + v_row_stride, + o_row_stride, + ${scale}, + ${is_causal}, + ${window_size_left}, + ${window_size_right}, + stream); + """ + + if "qkv" in attrs: + return substitute_template(template_stacked, attrs) + + return substitute_template(template, attrs) + + +def instantiate_flash_attention_var_len_template(attrs): + """Return host code for flash attention with variable sequence lengths.""" + + template = """ + int _max_seqlen_q = ((int32_t*)${max_seqlen_q}->data)[0]; + int _max_seqlen_k = ((int32_t*)${max_seqlen_k}->data)[0]; + + int batch_size = ${seqstart_q}->shape[0] - 1; + + int q_head_stride = ${head_dim}; + int k_head_stride = ${head_dim}; + int v_head_stride = ${head_dim}; + int o_head_stride = ${head_dim}; + int q_row_stride = q_head_stride * ${num_q_heads}; + int k_row_stride = k_head_stride * ${num_kv_heads}; + int v_row_stride = v_head_stride * ${num_kv_heads}; + int o_row_stride = o_head_stride * ${num_q_heads}; + + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + + flash_attn::flash_attention_var_len_forward( + static_cast(${query}->data), + static_cast(${key}->data), + static_cast(${value}->data), + static_cast(${seqstart_q}->data), + static_cast(${seqstart_k}->data), + static_cast(out0->data), + batch_size, + _max_seqlen_q, + _max_seqlen_k, + ${num_q_heads}, + ${num_kv_heads}, + ${head_dim}, + q_head_stride, + k_head_stride, + v_head_stride, + o_head_stride, + q_row_stride, + k_row_stride, + v_row_stride, + o_row_stride, + ${scale}, + ${is_causal}, + // For SWA, is_causal must be false. + ${is_causal} ? _max_seqlen_k : ${window_size_left}, + ${window_size_right}, + stream); + """ + + return substitute_template(template, attrs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/build.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/build.py new file mode 100644 index 0000000000000000000000000000000000000000..1c0a30c62d9111cd58beef2942c3295df363dee3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/build.py @@ -0,0 +1,1164 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, dangerous-default-value, arguments-differ +"""Driver for partitioning and building a Relay module for CUTLASS offload.""" +import itertools +import logging +import multiprocessing +import operator +import os +from functools import reduce +from typing import Optional, Sequence + +import tvm +from tvm import relax, relay, runtime +from tvm._ffi.registry import register_func +from tvm.contrib.nvcc import get_cuda_version +from tvm.topi.utils import get_const_tuple + +from .gen_conv2d import CutlassConv2DProfiler +from .gen_gemm import CutlassGemmProfiler +from .library import ConvKind, LayoutType + +logger = logging.getLogger("cutlass") + + +def has_cutlass(): + """Returns true if the CUTLASS custom codegen is available""" + return tvm.get_global_func("relay.ext.cutlass.create_c_source_module", True) is not None + + +def _get_cutlass_path(): + invalid_paths = [] + for rel in ["../../../../", "../../../", "../../"]: + tvm_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), rel) + cutlass_path = os.path.join(tvm_root, "3rdparty/cutlass") + if os.path.exists(cutlass_path): + return cutlass_path + invalid_paths.append(cutlass_path) + raise AssertionError(f"The CUTLASS root directory not found in: {invalid_paths}") + + +def _get_cutlass_compile_options(sm, threads, use_fast_math=False): + cutlass_root = _get_cutlass_path() + cutlass_include = os.path.join(cutlass_root, "include") + cutlass_util_include = os.path.join(cutlass_root, "tools/util/include") + cutlass_attention_include = os.path.join(cutlass_root, "examples/41_fused_multi_head_attention") + cutlass_fpA_intB_gemm_include = os.path.join(cutlass_root, "../cutlass_fpA_intB_gemm") + flash_attn_include = os.path.join(cutlass_root, "../libflash_attn/include") + + kwargs = {} + kwargs["cc"] = "nvcc" + kwargs["options"] = [ + "-c", + "-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1", + f"-gencode=arch=compute_{sm},code=[sm_{sm},compute_{sm}]", + "-DNDEBUG", + "-Xcompiler=-fPIC", + "-Xcompiler=-Wconversion", + "-Xcompiler=-fno-strict-aliasing", + "-Xcompiler=-fvisibility=hidden", + "-O3", + "-std=c++17", + f"-I{cutlass_include}", + f"-I{cutlass_util_include}", + f"-I{cutlass_attention_include}", + f"-I{cutlass_fpA_intB_gemm_include}", + f"-I{flash_attn_include}", + ] + if use_fast_math: + kwargs["options"].append("-DCUTLASS_USE_TANH_FOR_SIGMOID") + cuda_ver = get_cuda_version() + if cuda_ver >= (11, 2): + ncpu = multiprocessing.cpu_count() if threads < 0 else threads + kwargs["options"].append(f"-t {ncpu}") + return kwargs + + +class OpAnnotator(tvm.relay.ExprVisitor): + """Annotates partitioned functions with shape and dtype information.""" + + def __init__(self): + super().__init__() + self.signature = {} + + def visit_call(self, call): + op = call.op + if isinstance(op, relay.Function) and "Composite" in op.attrs: + self.signature["op_type"] = op.attrs["Composite"] + for i, arg in enumerate(op.params): + self.signature[f"arg{i}_shape"] = arg.checked_type.shape + self.signature[f"arg{i}_dtype"] = arg.checked_type.dtype + self.signature["ret_shape"] = op.ret_type.shape + self.signature["ret_dtype"] = op.ret_type.dtype + self.visit(op.body) + + elif isinstance(op, tvm.ir.Op) and op.name in [ + "nn.conv2d", + "nn.conv2d_transpose", + "nn.conv2d_backward_weight", + ]: + self.op_attrs = call.attrs + + for arg in call.args: + self.visit(arg) + + +def select_gemm_kernel( + cutlass_profiler, + op_type, + MM, + KK, + NN, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + batched, + find_first_valid, + use_multiprocessing, +): + """Run CUTLASS profiler to select the best kernel, or return the default one for dynamic + workloads.""" + if any(isinstance(s, tvm.tir.Any) for s in [MM, KK, NN]): + out = cutlass_profiler.get_default( + op_type, out_dtype, arg0_dtype, arg1_dtype, use_3xtf32, batched=batched + ) + name, cutlass_op_def = out["name"], out["opdef"] + logger.info("Picked the default kernel %s", name) + else: + name, cutlass_op_def, _ = cutlass_profiler.profile( + op_type, + MM, + NN, + KK, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + batched=batched, + find_first_valid=find_first_valid, + use_multiprocessing=use_multiprocessing, + ) + if not find_first_valid: + logger.info("The best kernel is %s", name) + else: + logger.info("Picked the first kernel found %s", name) + + return name, cutlass_op_def + + +def handle_batch_matmul( + cutlass_profiler, + op_type, + arg0_shape, + arg1_shape, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + find_first_valid, + use_multiprocessing, +): + """Profile and select a kernel for batch_matmul op workload.""" + MM = arg0_shape[1] + KK = arg0_shape[2] + NN = arg1_shape[1] + + name, cutlass_op_def = select_gemm_kernel( + cutlass_profiler, + op_type, + MM, + KK, + NN, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + True, + find_first_valid, + use_multiprocessing, + ) + + return { + "batch": arg0_shape[0], + "batch_stride_A": arg0_shape[1] * arg0_shape[2], + "batch_stride_B": arg1_shape[1] * arg1_shape[2], + "batch_stride_C": arg0_shape[1] * arg1_shape[1], + "cutlass_op_def": cutlass_op_def, + "cutlass_op_name": name, + "lda": "K", + "ldb": "K", + "ldc": "N", + } + + +def handle_dense( + cutlass_profiler, + op_type, + arg0_shape, + arg1_shape, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + find_first_valid, + use_multiprocessing, +): + """Profile and select a kernel for dense op workload.""" + MM = arg0_shape[0] + KK = arg0_shape[1] + NN = arg1_shape[0] + + name, cutlass_op_def = select_gemm_kernel( + cutlass_profiler, + op_type, + MM, + KK, + NN, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + False, + find_first_valid, + use_multiprocessing, + ) + + assert "tn_align" in name, "Only supports (row_major, col_major) input layout for now." + + return { + "cutlass_op_def": cutlass_op_def, + "cutlass_op_name": name, + "lda": "K", + "ldb": "K", + "ldc": "N", + } + + +def handle_conv2d( + cutlass_profiler, + op_type, + d_shape, + w_shape, + padding, + strides, + dilation, + out_dtype, + data_dtype, + weight_dtype, + use_3xtf32, + split_k_slices, + profile_all_alignments, + find_first_valid, + use_multiprocessing, +): + """Profile and select a kernel for conv2d op workload.""" + if "conv2d_transpose" in op_type: + conv_kind = ConvKind.Dgrad + elif "backward_weight" in op_type: + conv_kind = ConvKind.Wgrad + else: + conv_kind = ConvKind.Fprop + + if any(isinstance(s, tvm.tir.Any) for s in d_shape): + out = cutlass_profiler.get_default( + op_type, out_dtype, data_dtype, weight_dtype, use_3xtf32, conv_kind, strides + ) + name, cutlass_op_def = out["name"], out["opdef"] + logger.info("Picked the default kernel %s", name) + else: + name, cutlass_op_def, _ = cutlass_profiler.profile( + op_type, + d_shape, + w_shape, + padding, + strides, + dilation, + out_dtype, + data_dtype, + weight_dtype, + use_3xtf32, + conv_kind, + split_k_slices, + profile_all_alignments, + find_first_valid=find_first_valid, + use_multiprocessing=use_multiprocessing, + ) + if not find_first_valid: + logger.info("The best kernel is %s", name) + else: + logger.info("Picked the first kernel found %s", name) + + return {"cutlass_op_def": cutlass_op_def, "cutlass_op_name": name} + + +def num_cutlass_partitions(mod): + return sum([(1 if "cutlass" in var.name_hint else 0) for var in mod.get_global_vars()]) + + +def tune_cutlass_kernels( + mod, + sm, + use_3xtf32=True, + split_k_slices=[1], + profile_all_alignments=False, + find_first_valid=False, + use_multiprocessing=False, + tmp_dir="./tmp", +): + """Given a module partitioned for CUTLASS offloading, profile each workload to select which + kernels to emit. + + Parameters + ---------- + mod : IRModule + The Relay module with cutlass partitions. + + sm : int + An integer specifying the compute capability. For example, 75 for Turing and + 80 or 86 for Ampere. + + use_3xtf32 : bool + Wheter or not use slower but very accurate (compared to tf32) 3xtf32 mode for + fp32 inputs on tensorcore. + + split_k_slices : list of int + Split factor candidates for split-K GEMM. If split-K > 1, the GEMM K-loop is computed in + parallel across split-K blocks, and a separate global reduction kernel is launched to + accumulate partial reductions. The profiler will pick the best split-k factor from the + given candidate list. Note that the larger split-K factor requires a larger workspace. + Currently, parallel split-k has been tested only for wgrad. For GEMM and other conv2d + kinds, split_k_slices is ignored. + + profile_all_alignments : bool + When True, profile all kernal variants with smaller alignments than the largest possible. + + find_first_valid : bool + Whether or not profile all candidate kernels, or stop profiling after + the first applicable kernel is found. + + use_multiprocessing : bool + Whether or not compile profiler executables for different kernels in parallel. + + tmp_dir : string, optional + A temporary directory where intermediate compiled artifacts will be stored. + + Returns + ------- + mod : IRModule + The updated module annotated with cutlass profiling information. + + num_cutlass_partition : int + The number of partitioned functions created for CUTLASS. + """ + gemm_profiler = CutlassGemmProfiler(sm, _get_cutlass_path(), tmp_dir) + conv2d_profiler = CutlassConv2DProfiler(sm, _get_cutlass_path(), tmp_dir) + num_cutlass_partition = 0 + for var in mod.get_global_vars(): + fun_name = var.name_hint + func = mod[fun_name] + if "cutlass" in fun_name: + num_cutlass_partition += 1 + new_func = tune_cutlass_function( + func, + use_3xtf32, + split_k_slices, + profile_all_alignments, + find_first_valid, + use_multiprocessing, + gemm_profiler, + conv2d_profiler, + ) + mod.update_func(var, new_func) + + return mod, num_cutlass_partition + + +def tune_cutlass_function( + func, + use_3xtf32, + split_k_slices, + profile_all_alignments, + find_first_valid, + use_multiprocessing, + gemm_profiler, + conv2d_profiler, +): + """Given a function intended to be offloaded to CUTLASS, profile each workload to select which + kernels to emit. + + Parameters + ---------- + func : IRModule + The Relay Function to tune for. + + use_3xtf32 : bool + Wheter or not use slower but very accurate (compared to tf32) 3xtf32 mode for + fp32 inputs on tensorcore. + + split_k_slices : list of int + Split factor candidates for split-K GEMM. If split-K > 1, the GEMM K-loop is computed in + parallel accross split-K blocks, and a seperate global reduction kernel is launched to + accumulate partial reductions. The profiler will pick the best split-k factor from the + given candidate list. Note that the larger split-K factor requires a larger workspace. + Currently, parallel split-k has been tested only for wgrad. For GEMM and other conv2d + kinds, split_k_slices is ignored. + + profile_all_alignments : bool + When True, profile all kernal variants with smaller alignments than the largest possible. + + find_first_valid : bool + Whether or not profile all candidate kernels, or stop profiling after + the first applicable kernel is found. + + use_multiprocessing : bool + Whether or not compile profiler executables for different kernels in parallel. + + gemm_profiler : CutlassGemmProfiler + Profiler for dense operators. May cache results between tuned functions. + + conv2d_profiler : CutlassConv2DProfiler + Profiler for conv2d operators. May cach results between tuned functions. + + Returns + ------- + annot_func : Function + The input function with attributes capturing the best CUTLASS kernel found by tuning. + """ + annotator = OpAnnotator() + annotator.visit(func) + out_shape = annotator.signature["ret_shape"] + out_dtype = annotator.signature["ret_dtype"] + op_type = annotator.signature["op_type"] + + new_attrs = {"op_type": op_type} + new_attrs.update(annotator.signature) + new_attrs.update(func.attrs) + arg0_shape = new_attrs["arg0_shape"] + arg1_shape = new_attrs["arg1_shape"] + arg0_dtype = new_attrs["arg0_dtype"] + arg1_dtype = new_attrs["arg1_dtype"] + + if "conv2d" in op_type: + new_attrs["padding"] = annotator.op_attrs.padding + new_attrs["strides"] = annotator.op_attrs.strides + new_attrs["dilation"] = annotator.op_attrs.dilation + + if "conv2d_transpose" in op_type: + d_shape = out_shape + w_shape = arg1_shape + elif "conv2d_backward_weight" in op_type: + d_shape = arg1_shape + w_shape = out_shape + else: + d_shape = arg0_shape + w_shape = arg1_shape + + new_attrs.update( + handle_conv2d( + conv2d_profiler, + op_type, + d_shape, + w_shape, + annotator.op_attrs.padding, + annotator.op_attrs.strides, + annotator.op_attrs.dilation, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + split_k_slices, + profile_all_alignments, + find_first_valid, + use_multiprocessing, + ) + ) + elif "batch_matmul" in op_type: + new_attrs.update( + handle_batch_matmul( + gemm_profiler, + op_type, + arg0_shape, + arg1_shape, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + find_first_valid, + use_multiprocessing, + ) + ) + elif "dense" in op_type: + new_attrs.update( + handle_dense( + gemm_profiler, + op_type, + arg0_shape, + arg1_shape, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + find_first_valid, + use_multiprocessing, + ) + ) + else: + raise ValueError(f"{op_type} unsupported composite") + + new_attrs = tvm.ir.make_node("DictAttrs", **new_attrs) + return relay.Function( + func.params, + func.body, + ret_type=func.ret_type, + type_params=func.type_params, + attrs=new_attrs, + ) + + +def _get_call_node(expr: relax.Expr, op_name: str) -> Optional[relax.Call]: + node = None + + def fvisit(e): + nonlocal node + if isinstance(e, relax.Call) and e.op.name == op_name: + node = e + + relax.analysis.post_order_visit(expr, fvisit) + return node + + +def _extract_relax_function_signature(f): + signature = {} + + for i, arg in enumerate(f.params): + sinfo = arg.struct_info + if isinstance(sinfo, relax.TensorStructInfo): + signature["arg%d_shape" % i] = get_const_tuple(sinfo.shape) + signature["arg%d_dtype" % i] = sinfo.dtype + elif isinstance(sinfo, relax.ShapeStructInfo): + signature["arg%d_shape" % i] = get_const_tuple(sinfo.values) + else: + raise NotImplementedError() + + ret_sinfo = f.ret_struct_info + if ret_sinfo.shape is not None: + signature["ret_shape"] = get_const_tuple(ret_sinfo.shape) + else: + signature["ret_shape"] = None + signature["ret_dtype"] = ret_sinfo.dtype + + return signature + + +def _extract_arg_idx(pattern_name, f): + extract_func = tvm.get_global_func("relax.contrib.extract_arg_idx") + arg_indices = extract_func(pattern_name, f) + return {k: int(v) for k, v in arg_indices.items()} + + +def is_shape_valid_for_cutlass_matmul( + lhs_shape: Sequence[tvm.ir.PrimExpr], + rhs_shape: Sequence[tvm.ir.PrimExpr], +) -> bool: + """ + Check whether the shape of inputs can be handled by CUTLASS GEMM. + + The stride-based batch matmul in CUTLASS cannot handle cases that some of + the batch dimensions need to be stretched while others don't. This means + it can only handle ND x ND whose batch dimensions match exactly on both side, + as well as ND x 2D and 2D x ND. For example, it cannot handle matmul with shape + (2, 1, 4, 8) x (2, 3, 8, 16), because the batch stride of lhs is not constant. + """ + if not isinstance(lhs_shape[-1], (tvm.tir.expr.IntImm, int)): + # Reduction axis must be constant + return False + + lhs_batches = reduce(operator.mul, lhs_shape[:-2], 1) + rhs_batches = reduce(operator.mul, rhs_shape[:-2], 1) + if lhs_batches == 1 or rhs_batches == 1: + # This could be regular matmul or batch matmul with shape ND x 2D or 2D x ND + return True + + analyzer = tvm.arith.Analyzer() + # If one side has less dimensions, use 1 to fill the gap + batch_dim_pairs = list( + itertools.zip_longest( + list(lhs_shape)[-3::-1], # Remove the last two dimensions and reverse + list(rhs_shape)[-3::-1], + fillvalue=1, + ) + ) + return all(analyzer.can_prove_equal(p[0], p[1]) for p in batch_dim_pairs) + + +@relax.expr_functor.mutator +class CutlassRelaxFunctionAnnotator(relax.PyExprMutator): + """A Relax function mutator that tunes and annotates CUTLASS composite functions + with shape, dtype and generated templates. + """ + + def __init__( + self, + mod, + conv2d_profiler: CutlassConv2DProfiler, + gemm_profiler: CutlassGemmProfiler, + options, + ): + super().__init__(mod) + self.options = options + self.conv2d_profiler = conv2d_profiler + self.gemm_profiler = gemm_profiler + + def handle_conv2d(self, f, op_type): + """Tune and annotate a conv2d op.""" + signature = _extract_relax_function_signature(f) + arg_idx = _extract_arg_idx(op_type, f) + op_attrs = _get_call_node(f.body, "relax.nn.conv2d").attrs + + data_arg = f"arg{arg_idx['lhs']}" + weight_arg = f"arg{arg_idx['rhs']}" + + d_shape = signature[f"{data_arg}_shape"] + w_shape = signature[f"{weight_arg}_shape"] + out_shape = signature["ret_shape"] + data_dtype = signature[f"{data_arg}_dtype"] + weight_dtype = signature[f"{weight_arg}_dtype"] + out_dtype = signature["ret_dtype"] + padding = op_attrs["padding"] + strides = op_attrs["strides"] + dilation = op_attrs["dilation"] + conv_kind = ConvKind.Fprop + + use_3xtf32 = self.options.get("use_3xtf32", False) + profile_all_alignments = self.options.get("profile_all_alignments", False) + find_first_valid = self.options.get("find_first_valid", True) + use_multiprocessing = self.options.get("use_multiprocessing", True) + split_k_slices = self.options.get("split_k_slices", [1]) + + op_name, op_def, _ = self.conv2d_profiler.profile( + op_type, + d_shape, + w_shape, + padding, + strides, + dilation, + out_dtype, + data_dtype, + weight_dtype, + use_3xtf32, + conv_kind, + split_k_slices, + profile_all_alignments, + find_first_valid=find_first_valid, + use_multiprocessing=use_multiprocessing, + ) + + attrs = { + "op_type": op_type, + "data_arg_idx": arg_idx["lhs"], + "weight_arg_idx": arg_idx["rhs"], + "bias_arg_idx": arg_idx.get("bias"), + "residual_arg_idx": arg_idx.get("residual"), + "arg0_dtype": data_dtype, + "arg1_dtype": weight_dtype, + "ret_dtype": out_dtype, + "arg0_shape": d_shape, + "arg1_shape": w_shape, + "ret_shape": out_shape, + "strides": strides, + "padding": padding, + "dilation": dilation, + "cutlass_op_name": op_name, + "cutlass_op_def": op_def, + } + + residual_arg = arg_idx.get("residual") + + if residual_arg: + residual_shape = signature[f"arg{residual_arg}_shape"] + attrs["residual_shape"] = residual_shape + elif "residual" in op_type: + attrs["residual_shape"] = d_shape + + return f.with_attrs(attrs) + + def handle_decode_matmul(self, f, op_type): + """Annotate a decode -> matmul op.""" + arg_idx = _extract_arg_idx(op_type, f) + signature = _extract_relax_function_signature(f) + lhs_arg = f"arg{arg_idx['lhs']}" + rhs_arg = f"arg{arg_idx['w_encoded']}" + lhs_shape = signature[f"{lhs_arg}_shape"] + rhs_shape = signature[f"{rhs_arg}_shape"] + ret_shape = signature["ret_shape"] + scale_arg = f"arg{arg_idx['scales']}" + scale_shape = signature[f"{scale_arg}_shape"] + N = ret_shape[-1] + + attrs = { + "op_type": op_type, + "lhs_arg_idx": arg_idx["lhs"], + "rhs_arg_idx": arg_idx["w_encoded"], + "scales_arg_idx": arg_idx["scales"], + "bias_arg_idx": arg_idx.get("bias"), + "activation": "identity", + } + # TODO(wuwei): find a better way to get group size + attrs["group_size"] = 64 if len(scale_shape) == 2 and scale_shape[0] != 1 else -1 + + attrs["batch_rank"] = len(lhs_shape[:-1]) + attrs["M"] = reduce(operator.mul, lhs_shape[:-1], 1) + + attrs["bias_stride"] = 0 + + if "bias" in arg_idx: + bias_shape = signature[f"arg{arg_idx['bias']}_shape"] + bias_shape_1d = reduce(operator.mul, bias_shape, 1) + if bias_shape_1d != bias_shape[-1]: + attrs["bias_stride"] = bias_shape[-1] + + if N == rhs_shape[1]: + attrs["weight_nbit"] = 8 + else: + assert N == rhs_shape[1] * 2 + attrs["weight_nbit"] = 4 + + if "residual" in op_type: + residual_pos = op_type.find("residual_") + postfix = op_type[residual_pos + len("residual_") :] + + if postfix.startswith("multiply"): + binary_op = "multiply" + else: + binary_op = "plus" + + if "relu" in postfix: + unary_op = "relu" + else: + unary_op = "identity" + + activation = "identity" + + for act in ["relu", "silu", "gelu"]: + if act in op_type[op_type.find("matmul_") + len("matmul_") : residual_pos]: + activation = act + break + + attrs.update( + { + "unary_op": unary_op, + "binary_op": binary_op, + "activation": activation, + "residual_arg_idx": arg_idx["residual"], + } + ) + else: + for act in ["relu", "silu", "gelu"]: + if act in op_type: + attrs["activation"] = act + break + + return f.with_attrs(attrs) + + def handle_matmul(self, f, op_type): + """Tune and annotate a matmul op.""" + signature = _extract_relax_function_signature(f) + arg_idx = _extract_arg_idx(op_type, f) + + lhs_arg = f"arg{arg_idx['lhs']}" + rhs_arg = f"arg{arg_idx['rhs']}" + + lhs_shape = signature[f"{lhs_arg}_shape"] + rhs_shape = signature[f"{rhs_arg}_shape"] + out_shape = signature["ret_shape"] + lhs_dtype = signature[f"{lhs_arg}_dtype"] + rhs_dtype = signature[f"{rhs_arg}_dtype"] + out_dtype = signature["ret_dtype"] + + if not is_shape_valid_for_cutlass_matmul(lhs_shape, rhs_shape): + raise ValueError(f"Cannot handle the input shapes, lhs: {lhs_shape}, rhs: {rhs_shape}") + + MM = lhs_shape[-2] + KK = lhs_shape[-1] + if "transposed" in op_type: + NN = rhs_shape[-2] + ldb = "K" + layout_b = LayoutType.ColumnMajor + else: + NN = rhs_shape[-1] + ldb = "N" + layout_b = LayoutType.RowMajor + + lhs_batches = reduce(operator.mul, lhs_shape[:-2], 1) + rhs_batches = reduce(operator.mul, rhs_shape[:-2], 1) + if lhs_batches == 1 and rhs_batches == 1: + # Regular matmul + is_batched = False + batch_attrs = {} + else: + is_batched = True + batch_attrs = { + # If both lhs_batches and rhs_batches are greater than 1, + # they must be equal. This is checked by is_shape_valid_for_cutlass_matmul. + "batch": lhs_batches if rhs_batches == 1 else rhs_batches, + "batch_stride_A": 0 if lhs_batches == 1 else MM * KK, + "batch_stride_B": 0 if rhs_batches == 1 else KK * NN, + "batch_stride_C": MM * NN, + } + + use_3xtf32 = self.options.get("use_3xtf32", False) + find_first_valid = self.options.get("find_first_valid", True) + use_multiprocessing = self.options.get("use_multiprocessing", True) + + op_name, op_def, _ = self.gemm_profiler.profile( + op_type, + MM, + NN, + KK, + out_dtype, + lhs_dtype, + rhs_dtype, + use_3xtf32, + batched=is_batched, + find_first_valid=find_first_valid, + use_multiprocessing=use_multiprocessing, + layout_b=layout_b, + ) + + return f.with_attrs( + { + "op_type": op_type, + "lhs_arg_idx": arg_idx["lhs"], + "rhs_arg_idx": arg_idx["rhs"], + "residual_arg_idx": arg_idx.get("residual"), + "bias_arg_idx": arg_idx.get("bias"), + "arg0_dtype": signature["arg0_dtype"], + "arg1_dtype": signature["arg1_dtype"], + "ret_dtype": out_dtype, + "arg0_shape": signature["arg0_shape"], + "arg1_shape": signature["arg1_shape"], + "ret_shape": out_shape, + "lda": "K", + "ldb": ldb, + "ldc": "N", + "cutlass_op_name": op_name, + "cutlass_op_def": op_def, + **batch_attrs, + } + ) + + def handle_attention(self, f, op_type): + """Annotate an attention op.""" + signature = _extract_relax_function_signature(f) + + if _get_call_node(f.body, "relax.nn.attention") is not None: + op_attrs = _get_call_node(f.body, "relax.nn.attention").attrs + elif _get_call_node(f.body, "relax.nn.attention_bias") is not None: + op_attrs = _get_call_node(f.body, "relax.nn.attention_bias").attrs + elif _get_call_node(f.body, "relax.nn.attention_var_len") is not None: + op_attrs = _get_call_node(f.body, "relax.nn.attention_var_len").attrs + else: + raise ValueError("Cannot find call node for attention") + arg = {} + + if "stacked_attention" in op_type: + arg["arg0_shape"] = signature["arg0_shape"] + arg["arg0_dtype"] = signature["arg0_dtype"] + arg["arg1_shape"] = q_shape = signature["arg1_shape"] + + if "arg3_shape" not in signature: + # arg0: qkv, arg1: shape, arg2: workspace + arg["arg2_shape"] = k_shape = signature["arg1_shape"] + arg["arg3_shape"] = v_shape = signature["arg1_shape"] + else: + # arg0: qkv, arg1: shape1, arg2: shape2, arg3: shape3, arg4: workspace + arg["arg2_shape"] = k_shape = signature["arg2_shape"] + arg["arg3_shape"] = v_shape = signature["arg3_shape"] + + if "arg5_dtype" in signature: + # arg0: qkv, arg1: shape1, arg2: shape2, arg3: shape3, arg4: bias, arg5: workspace + arg["bias_dtype"] = signature["arg4_dtype"] + if "arg5_shape" in signature: + arg["bias_shape"] = signature["arg4_shape"] + + qkv_layout = "qkv_stacked" + else: + # arg0: q, arg1: k, arg2: v, arg3: bias, arg4: workspace + arg["arg0_shape"] = q_shape = signature["arg0_shape"] + arg["arg1_shape"] = k_shape = signature["arg1_shape"] + arg["arg2_shape"] = v_shape = signature["arg2_shape"] + arg["arg0_dtype"] = signature["arg0_dtype"] + arg["arg1_dtype"] = signature["arg1_dtype"] + arg["arg2_dtype"] = signature["arg2_dtype"] + + if "arg4_dtype" in signature: + arg["bias_dtype"] = signature["arg3_dtype"] + if "arg4_shape" in signature: + arg["bias_shape"] = signature["arg3_shape"] + + qkv_layout = "default" + + out_shape = signature["ret_shape"] + out_dtype = signature["ret_dtype"] + num_batches, num_queries, num_q_heads, head_dim = q_shape + _, num_keys, num_kv_heads, _ = k_shape + _, _, _, head_dim_value = v_shape + scale = op_attrs.scale + + if op_attrs.causal_mask is None: + custom_mask_type = 0 + elif op_attrs.causal_mask == "TopLeft": + custom_mask_type = 1 + elif op_attrs.causal_mask == "BottomRight": + custom_mask_type = 2 + else: + raise NotImplementedError() + + attrs = { + "op_type": op_type, + "ret_dtype": out_dtype, + "ret_shape": out_shape, + "num_batches": num_batches, + "num_queries": num_queries, + "num_keys": num_keys, + "num_q_heads": num_q_heads, + "num_kv_heads": num_kv_heads, + "head_dim": head_dim, + "head_dim_value": head_dim_value, + "scale": scale, + "arch": self.options["sm"], + "qkv_layout": qkv_layout, + "custom_mask_type": custom_mask_type, + **arg, + } + + if "var_len" in op_type: + arg_idx = _extract_arg_idx(op_type, f) + for arg in ["seqstart_q", "seqstart_k", "max_seqlen_q", "max_seqlen_k"]: + if arg in arg_idx: + attrs[arg + "_idx"] = arg_idx[arg] + + if op_attrs.window_size: + attrs["window_size"] = op_attrs.window_size + + return f.with_attrs(attrs) + + def handle_norm(self, f, op_type): + """Annotate a layer or rms norm op.""" + signature = _extract_relax_function_signature(f) + attrs = {} + attrs["batch_rank"] = len(signature["arg0_shape"][:-1]) + attrs["M"] = reduce(operator.mul, signature["arg0_shape"][:-1], 1) + attrs["N"] = signature["arg0_shape"][-1] + dtype = signature["arg0_dtype"] + attrs["data_type"] = {"float32": "float", "float16": "cutlass::half_t"}[str(dtype)] + + if "rms" in op_type: + attrs["rms_eps"] = self.options.get("rms_eps", 1e-5) + else: + attrs["layer_norm_eps"] = self.options.get("layer_nrom_eps", 1e-5) + + return f.with_attrs(attrs) + + def visit_function_(self, f): + if "Composite" not in f.attrs: + body = super().visit_expr(f.body) + return relax.Function(f.params, body, f.ret_struct_info, f.is_pure, f.attrs, f.span) + + op_type = f.attrs["Composite"] + + if "conv2d" in op_type: + return self.handle_conv2d(f, op_type) + elif "decode" in op_type: + return self.handle_decode_matmul(f, op_type) + elif "matmul" in op_type: + return self.handle_matmul(f, op_type) + elif "attention" in op_type: + return self.handle_attention(f, op_type) + elif "layer_norm" in op_type or "rms_norm" in op_type: + return self.handle_norm(f, op_type) + + raise ValueError("Unsupported composite {}".format(op_type)) + + def visit_span(self, span): + return span + + +@register_func("contrib.cutlass.tune_relax_function") +def profile_relax_function(functions, options): + """Tune and annotate CUTLASS composite functions with shape, dtype and generated templates.""" + tmp_dir = options.get("tmp_dir", "./tmp") + sm = options.get("sm", 80) + conv2d_profiler = CutlassConv2DProfiler(sm, _get_cutlass_path(), tmp_dir) + gemm_profiler = CutlassGemmProfiler(sm, _get_cutlass_path(), tmp_dir) + + annotated_functions = [] + + for f in functions: + annotator = CutlassRelaxFunctionAnnotator( + tvm.IRModule.from_expr(f), conv2d_profiler, gemm_profiler, options + ) + annotated_functions.append(annotator.visit_expr(f)) + + return annotated_functions + + +@register_func("contrib.cutlass.compile") +def compile_cutlass_module(c_source_module, options): + """Compile all CUTLASS kernels in the given C-source module. + + Parameters + ---------- + c_source_module: runtime.Module + A C-source module containing CUTLASS kernels. + + options: dict + Compilation options. Currently recognizes + "sm": The target architecture (compute capability), for example 75 or 80 (default: 80) + "threads": The number of threads to use in NVCC parallel compilation (default: + use all logical cores) + "use_fast_math": Whether or not to use faster but approximate arithmetic in some + CUTLASS epilogues (default: False) + + Returns + ------- + rt_mod : runtime.Module + A runtime module where all cutlass kernels have been compiled. + """ + tmp_dir = options.get("tmp_dir", "./tmp") + defaults = {"sm": 80, "threads": -1, "use_fast_math": False} + compile_config = {key: options.get(key, val) for key, val in defaults.items()} + + function_names = c_source_module.get_function("get_func_names")() + compile_options = _get_cutlass_compile_options(**compile_config) + lib_path = os.path.join(tmp_dir, "cutlass.o") + logger.info("Compiling generated CUTLASS code") + c_source_module.export_library(lib_path, workspace_dir=tmp_dir, **compile_options) + + # Recover static library + return tvm.runtime.load_static_library(lib_path, function_names) + + +@register_func("relay.ext.cutlass.compile_for_cutlass") +def compile_for_cutlass(mod, cutlass_target): + """Given an IRModule with at least one Compiler='cutlass' Relay function, return a + LibraryModule with all such functions compiled into their PackedFunc-compatible form. + - First runs CUTLASS tuning to decide on the best kernels, which itself requires the + repeated compilation and execution of CUDA code using nvcc. The results of this + is captured as annotation on each relevant function. Kernel performance is cached + overall all functions. + - Then generates a single CSourceModule containing C code implementing all the + Compiler='cutlass' Relay functions, accounting for the tuning done above. + - Then compiles that CSourceModule with the appropriate nvcc arguments to yield + a static .o library. An export_library step will be required on the final runtime + module to link that library into the overall .so library. + See CompileForCutlass in src/relay/backend/contrib/cutlass/codegen.cc for where this + helper function is used to implement the RelayToTIR pass hook for CUTLASS.""" + + # Recover options from the current 'cutlass' Target + assert cutlass_target.kind.name == "cutlass" + tuning_config = { + key: cutlass_target.attrs.get(key) + for key in [ + "sm", + "use_3xtf32", + "split_k_slices", + "profile_all_alignments", + "find_first_valid", + "use_multiprocessing", + ] + } + compile_config = { + key: cutlass_target.attrs.get(key) for key in ["sm", "threads", "use_fast_math"] + } + tmp_dir = cutlass_target.attrs.get("tmp_dir") + compile_config["tmp_dir"] = tmp_dir + + # Tune + logger.info("Tuning for CUTLASS") + mod, _ = tune_cutlass_kernels(mod, tmp_dir=tmp_dir, **tuning_config) + + # Compile + logger.info("Creating CSource module for CUTLASS") + create_c_source_module = tvm._ffi.get_global_func("relay.ext.cutlass.create_c_source_module") + c_module = create_c_source_module(mod) + return compile_cutlass_module(c_module, compile_config) + + +def finalize_modules(lib, lib_path="compile.so", tmp_dir="./tmp"): + """Returns lib with any C source, LLVM and static library modules complied and linked in ready + for use by the graph or AOT executors. This method is not specific to CUTLASS, however it does + assume nvcc will be used for final compilation and linking. It is provided here for + convenience. + + Parameters + ---------- + lib : runtime.Module + The output from relay.build. + + lib_path : string + The path to a shared library which will be generated as the result of the build process. + + tmp_dir : string + A temporary directory where intermediate compiled artifacts will be stored. + + Returns + ------- + updated_lib : runtime.Module + The updated library with all compilation and linking completed. + + """ + lib_path = os.path.join(tmp_dir, lib_path) + lib.export_library(lib_path, workspace_dir=tmp_dir, cc="nvcc") + return runtime.load_module(lib_path) + + +def finalize_modules_vm(vm_exec, lib_path="compile.so", vmcode_path="vmcode.ro", tmp_dir="./tmp"): + """Returns vm_exec with any C source, LLVM and static library modules compiled and linked in + ready for use by the VM executor. This method is not specific to CUTLASS, however it does + assume nvcc will be used for final compilation and linking. It is provided here for + convenience. + + Parameters + ---------- + vm_exec : vm.Executable + The output from relay.vm.compile containing compiled host code and kernels. + + lib_path : string + The path to a shared library which will be generated as the result of the build process. + + vmcode_path : string + The path where the VM bytecode will be serialized to as a side-effect. + + tmp_dir : string + A temporary directory where intermediate compiled artifacts will be stored. + + Returns + ------- + updated_vm_exec : vm.Executable + The updated VM executable with all compilation and linking completed. + """ + code, lib = vm_exec.save() + lib_path = os.path.join(tmp_dir, lib_path) + vmcode_path = os.path.join(tmp_dir, vmcode_path) + lib.export_library(lib_path, workspace_dir=tmp_dir, cc="nvcc") + with open(vmcode_path, "wb") as fo: + fo.write(code) + lib = tvm.runtime.load_module(lib_path) + return tvm.runtime.vm.Executable.load_exec(code, lib) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/conv2d_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/conv2d_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..77f4449db232b290e53d7402478721c68172ebd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/conv2d_operation.py @@ -0,0 +1,563 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, unused-wildcard-import, wildcard-import +"""Generator for CUTLASS Conv2D kernels.""" +from .library import * + + +class Conv2dOperation: + """Describes various attributes for instantiating Conv2d kernels.""" + + def __init__( + self, + conv_kind, + iterator_algorithm, + arch, + tile_description, + A, + B, + C, + element_epilogue, + stride_support, + epilogue_functor=EpilogueFunctor.LinearCombination, + swizzling_functor=SwizzlingFunctor.Identity1, + split_k_slices=1, + ): + self.operation_kind = OperationKind.Conv2d + self.arch = arch + self.tile_description = tile_description + self.conv_kind = conv_kind + self.A = A + self.B = B + self.C = C + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.iterator_algorithm = iterator_algorithm + self.stride_support = stride_support + self.swizzling_functor = swizzling_functor + self.split_k_slices = split_k_slices + + def accumulator_type(self): + return self.tile_description.math_instruction.element_accumulator + + def core_name(self): + """The basic operation kind is prefixed with a letter indicating the accumulation type.""" + intermediate_type = "" + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp: + inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) + if ( + self.tile_description.math_instruction.element_a != self.A.element + and self.tile_description.math_instruction.element_a != self.accumulator_type() + ): + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + else: + inst_shape = "" + + return "%s%s%s%s_%s" % ( + ShortDataTypeNames[self.accumulator_type()], + inst_shape, + intermediate_type, + ConvKindNames[self.conv_kind], + IteratorAlgorithmNames[self.iterator_algorithm], + ) + + def extended_name(self): + """Append data types if they differ from compute type.""" + if ( + self.C.element != self.tile_description.math_instruction.element_accumulator + and self.A.element != self.tile_description.math_instruction.element_accumulator + ): + extended_name = "${element_c}_${core_name}_${element_a}" + elif ( + self.C.element == self.tile_description.math_instruction.element_accumulator + and self.A.element != self.tile_description.math_instruction.element_accumulator + ): + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = substitute_template( + extended_name, + { + "element_a": DataTypeNames[self.A.element], + "element_c": DataTypeNames[self.C.element], + "core_name": self.core_name(), + }, + ) + + return extended_name + + def layout_name(self): + return f"{ShortLayoutTypeNames[self.A.layout]}" + + def procedural_name(self): + """ + The full procedural name indicates architecture, extended name, tile size, and layout. + """ + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + + threadblock = "%dx%d_%dx%d" % ( + self.tile_description.threadblock_shape[0], + self.tile_description.threadblock_shape[1], + self.tile_description.threadblock_shape[2], + self.tile_description.stages, + ) + + if self.stride_support == StrideSupport.Unity: + configuration_name = ( + "cutlass_${opcode_class}_${extended_name}_${threadblock}" + "_${layout}_align${alignment}_unity_stride" + ) + else: + configuration_name = ( + "cutlass_${opcode_class}_${extended_name}_${threadblock}" + "_${layout}_align${alignment}" + ) + + if self.split_k_slices > 1: + configuration_name += f"_splitk{self.split_k_slices}" + + return substitute_template( + configuration_name, + { + "opcode_class": opcode_class_name, + "extended_name": self.extended_name(), + "threadblock": threadblock, + "layout": self.layout_name(), + "alignment": f"{self.A.alignment}", + }, + ) + + +class EmitConv2dInstance: + """Responsible for emitting a CUTLASS template definition.""" + + def __init__(self): + self.epilogue_default = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >""" + + self.epilogue_no_beta_scaling = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue}, + cutlass::epilogue::thread::ScaleType::NoBetaScaling + >""" + + self.epilogue_residual_block = """ + ${epilogue_functor}< + ${element_c}, + ${element_accumulator}, + ${element_epilogue}, + ${element_c}, + ${epilogue_vector_length}, + ${activation}, + ${binary_op}, + ${unary_op} + >""" + + self.epilogue_wgrad = """ + ${epilogue_functor}< + ${element_c}, + 4, + float, + float + >""" + + self.template = """ + // Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" + using ${operation_name} = + typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}${conv_kernel_postfix}< + ${element_a}, + ${layout_a}, + ${element_b}, + ${layout_b}, + ${element_c}, + ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue}, + ${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>, + ${stages}, + ${math_operator}, + ${iterator_algorithm}, + ${stride_support}, + ${align_a}, + ${align_b} + >::Kernel; + + ${reduction} +""" + + self.reduction_template = """ +using EpilogueOutputOp = ${epilogue}; +using ReductionOp = cutlass::reduction::thread::ReduceAdd< + ${element_accumulator}, + ${element_accumulator}, + EpilogueOutputOp::kCount + >; + +using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< + cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, + EpilogueOutputOp, + ReductionOp + >; + +using ReductionDevice = cutlass::reduction::device::ReduceSplitK; +using ReductionStrideIndex = typename ReductionDevice::StrideIndex; +""" + + def emit( + self, operation, no_beta_scaling=False, residual_block_info=False, emit_reduction=False + ): + """Instantiate a Conv2d kernel from given `operation`.""" + warp_shape = [ + int( + operation.tile_description.threadblock_shape[idx] + / operation.tile_description.warp_count[idx] + ) + for idx in range(3) + ] + + epilogue_vector_length = int( + min(operation.C.alignment * DataTypeSize[operation.C.element], 128) + / DataTypeSize[operation.C.element] + ) + + element_c = operation.C.element + use_split_k_wgrad = operation.conv_kind == ConvKind.Wgrad and operation.split_k_slices > 1 + # Gemm output always fp32 in wgrad with split k + element_c_gemm = DataType.f32 if use_split_k_wgrad else element_c + + if emit_reduction: + epilogue_reduction = substitute_template( + self.epilogue_wgrad, + { + "epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor], + "element_c": DataTypeTag[element_c], + }, + ) + reduction = substitute_template( + self.reduction_template, + { + "epilogue": epilogue_reduction, + "operation_name": operation.procedural_name(), + "element_accumulator": DataTypeTag[operation.accumulator_type()], + }, + ) + gemm_template = substitute_template(self.template, {"reduction": reduction}) + else: + gemm_template = substitute_template(self.template, {"reduction": ""}) + + values = { + "operation_name": operation.procedural_name(), + "conv_kind": ConvKindTag[operation.conv_kind], + "conv_kind_name": ConvKindNames[operation.conv_kind].capitalize(), + "element_a": DataTypeTag[operation.A.element], + "layout_a": LayoutTag[operation.A.layout], + "element_b": DataTypeTag[operation.B.element], + "layout_b": LayoutTag[operation.B.layout], + "element_c": DataTypeTag[element_c_gemm], + "layout_c": LayoutTag[operation.C.layout], + "element_accumulator": DataTypeTag[operation.accumulator_type()], + "opcode_class": OpcodeClassTag[ + operation.tile_description.math_instruction.opcode_class + ], + "arch": f"cutlass::arch::Sm{operation.arch}", + "threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]), + "threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]), + "threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]), + "warp_shape_m": str(warp_shape[0]), + "warp_shape_n": str(warp_shape[1]), + "warp_shape_k": str(warp_shape[2]), + "instruction_shape_m": str( + operation.tile_description.math_instruction.instruction_shape[0] + ), + "instruction_shape_n": str( + operation.tile_description.math_instruction.instruction_shape[1] + ), + "instruction_shape_k": str( + operation.tile_description.math_instruction.instruction_shape[2] + ), + "epilogue_vector_length": str(epilogue_vector_length), + "epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor], + "element_epilogue": str(DataTypeTag[operation.element_epilogue]), + "swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor], + "stages": str(operation.tile_description.stages), + "iterator_algorithm": IteratorAlgorithmTag[operation.iterator_algorithm], + "iterator_algorithm_name": IteratorAlgorithmNames[ + operation.iterator_algorithm + ].capitalize(), + "stride_support": StrideSupportTag[operation.stride_support], + "math_operator": MathOperationTag[ + operation.tile_description.math_instruction.math_operation + ], + "align_a": str(operation.A.alignment), + "align_b": str(operation.B.alignment), + "conv_kernel_postfix": "", + } + + if use_split_k_wgrad: + # Even if the output is fp16, gemm output is always fp32 for split k wgrad. + epilogue_gemm = substitute_template( + self.epilogue_wgrad, + { + "epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor], + "element_c": "float", + }, + ) + template = substitute_template(gemm_template, {"epilogue": epilogue_gemm}) + elif residual_block_info: + template = substitute_template( + gemm_template, {"epilogue": self.epilogue_residual_block} + ) + values.update( + { + "unary_op": residual_block_info["unary_op"], + "binary_op": residual_block_info["binary_op"], + "activation": residual_block_info["activation"], + "conv_kernel_postfix": "WithBroadcast", + } + ) + elif no_beta_scaling: + template = substitute_template( + gemm_template, {"epilogue": self.epilogue_no_beta_scaling} + ) + else: + template = substitute_template(gemm_template, {"epilogue": self.epilogue_default}) + + return substitute_template(template, values) + + +def instantiate_conv2d_template(attrs): + """Return CUTLASS host code for conv2d based on a template and the provided attribute map.""" + template = """ + ${cutlass_op_def} + + using Conv2d = cutlass::conv::device::ImplicitGemmConvolution<${cutlass_op_name}>; + using ElementInputA = Conv2d::ElementA; + using ElementInputB = Conv2d::ElementB; + using ElementComputeEpilogue = Conv2d::ElementAccumulator; + int N = ${N}; + int H = ${H}; + int W = ${W}; + int C = ${C}; + int K = ${K}; + int R = ${R}; + int S = ${S}; + int P = ${P}; + int Q = ${Q}; + int pad_h = ${pad_h}; + int pad_w = ${pad_w}; + int stride_h = ${stride_h}; + int stride_w = ${stride_w}; + int dilation_h = ${dilation_h}; + int dilation_w = ${dilation_w}; + int split_k_slices = ${split_k_slices}; + cutlass::conv::Conv2dProblemSize problem_size(N, H, W, C, K, R, S, P, Q, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, cutlass::conv::Mode::kCrossCorrelation, split_k_slices); + const cutlass::conv::SplitKMode split_k_mode = cutlass::conv::SplitKMode::${split_k_mode}; + + void* ptr_a = (void*)(${data_arg}->data); + void* ptr_b = (void*)(${weight_arg}->data); + ${bias_decl} + ${residual_decl} + void* ptr_out = (void*)(out0->data); + + ElementComputeEpilogue alpha = ElementComputeEpilogue(1); + ElementComputeEpilogue beta = ElementComputeEpilogue(${beta}); + using cutlass::layout::TensorNHWC; + auto activation_shape = TensorNHWC::packed(cutlass::make_Coord(N, H, W, C)); + auto weight_shape = TensorNHWC::packed(cutlass::make_Coord(K, R, S, C)); + auto output_shape = TensorNHWC::packed(cutlass::make_Coord(N, P, Q, K)); + ${residual_shape_decl} + + TensorNHWC layout_A(${A_shape}); + TensorNHWC layout_B(${B_shape}); + TensorNHWC layout_C(${C_shape}); + TensorNHWC layout_D(${D_shape}); + + using ElementOutput = ${ElementOutput}; + cutlass::TensorRef tensor_c{static_cast(${tensor_c}), ${tensor_c_layout}}; + cutlass::TensorRef tensor_d{static_cast(ptr_out), layout_D}; + typename Conv2d::Arguments arguments{ + problem_size, + {static_cast(ptr_a), layout_A}, + {static_cast(ptr_b), layout_B}, + ${tensor_c_arg}, + ${tensor_d_arg}, + {${alpha_beta}}, + split_k_mode + ${additional_args} + }; + Conv2d conv2d_op; + size_t workspace_size = conv2d_op.get_workspace_size(arguments); + cutlass::device_memory::allocation workspace(workspace_size); + cutlass::Status status = conv2d_op.can_implement(arguments); + CHECK(status == cutlass::Status::kSuccess); + ${split_k_reset} + status = conv2d_op.initialize(arguments, workspace.get()); + CHECK(status == cutlass::Status::kSuccess); + ${split_k_update} + + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + + status = conv2d_op(stream); + CHECK(status == cutlass::Status::kSuccess); + ${split_k_reduction} +""" + + split_k_reset = """ + arguments.ref_D.reset(reinterpret_cast(workspace.get()), layout_D); +""" + + split_k_update = """ + arguments.output_op = {ElementComputeEpilogue(1), ElementComputeEpilogue(0)}; + status = conv2d_op.update(arguments, workspace.get()); + CHECK(status == cutlass::Status::kSuccess); +""" + + split_k_reduction = """ + ReductionDevice reduction_op; + const static cutlass::conv::Operator kConvolutionalOperator = Conv2d::kConvolutionalOperator; + typename ReductionDevice::Arguments reduction_args( + cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(), + problem_size.split_k_slices, + cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size), + { + reinterpret_cast (workspace.get()), + ReductionStrideIndex(tensor_c.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) + }, + { + tensor_d.data(), + ReductionStrideIndex(tensor_d.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) + }, + { + tensor_c.data(), + ReductionStrideIndex(tensor_c.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) + }, + {alpha, beta} + ); + status = reduction_op.initialize(reduction_args, nullptr); + status = reduction_op(); +""" + op_type = attrs["op_type"] + has_bias = "bias" in op_type + use_split_k = "splitk" in attrs["cutlass_op_name"] + is_wgrad = "backward_weight" in op_type + is_dgrad = "conv2d_transpose" in op_type + has_residual_block = "residual" in op_type + no_bias_scaling = op_type not in [ + "cutlass.conv2d_bias_sigmoid", + "cutlass.conv2d_bias_silu", + "cutlass.conv2d_bias_hardswish", + ] + + aux_map = {} + + if (not has_bias or no_bias_scaling) and not has_residual_block: + aux_map["beta"] = 0 + else: + aux_map["beta"] = 1 + + if has_residual_block: + aux_map["bias_decl"] = "void* ptr_bias = (void*)(${bias_arg}->data);\n" + aux_map["residual_decl"] = "void* ptr_residual = (void*)(${residual_arg}->data);" + aux_map["tensor_c"] = "ptr_residual" + aux_map["tensor_c_layout"] = "layout_C" + elif has_bias: + aux_map["bias_decl"] = "void* ptr_c_bias = (void*)(${bias_arg}->data);\n" + aux_map["residual_decl"] = "" + aux_map["tensor_c"] = "ptr_c_bias" + aux_map["tensor_c_layout"] = "cutlass::layout::TensorNHWC::Stride(0)" + else: + aux_map["bias_decl"] = "" + aux_map["residual_decl"] = "" + aux_map["tensor_c"] = "ptr_out" + aux_map["tensor_c_layout"] = "layout_C" + + if has_bias and no_bias_scaling and not has_residual_block: + aux_map["alpha_beta"] = "alpha" + else: + aux_map["alpha_beta"] = "alpha, beta" + + if has_residual_block: + aux_map["additional_args"] = ", static_cast(ptr_bias), nullptr, 0, K" + else: + aux_map["additional_args"] = "" + + aux_map["residual_shape_decl"] = "" + + if is_wgrad: + aux_map["A_shape"] = "output_shape" + aux_map["B_shape"] = "activation_shape" + aux_map["C_shape"] = "weight_shape" + aux_map["D_shape"] = "weight_shape" + elif is_dgrad: + aux_map["A_shape"] = "output_shape" + aux_map["B_shape"] = "weight_shape" + aux_map["C_shape"] = "activation_shape" + aux_map["D_shape"] = "activation_shape" + else: + aux_map["A_shape"] = "activation_shape" + aux_map["B_shape"] = "weight_shape" + aux_map["D_shape"] = "output_shape" + + if has_residual_block: + res_shape = list(attrs.pop("residual_shape")) + shape_str = f"cutlass::make_Coord({res_shape[0]}, {res_shape[1]}, {res_shape[2]}, K)" + aux_map[ + "residual_shape_decl" + ] = f"auto residual_shape = TensorNHWC::packed({shape_str});" + aux_map["C_shape"] = "residual_shape" + + if res_shape == [int(attrs[c]) for c in ["N", "H", "W", "K"]]: + aux_map["tensor_c_layout"] = "layout_C" + else: + # bias-like residual input + aux_map["tensor_c_layout"] = "cutlass::layout::TensorNHWC::Stride(0)" + else: + aux_map["C_shape"] = "output_shape" + + if use_split_k: + aux_map["ElementOutput"] = "EpilogueOutputOp::ElementOutput" + aux_map["tensor_c_arg"] = "{nullptr, TensorNHWC()}" + aux_map["tensor_d_arg"] = "{nullptr, TensorNHWC()}" + aux_map["split_k_reset"] = split_k_reset + aux_map["split_k_update"] = split_k_update + aux_map["split_k_reduction"] = split_k_reduction + else: + aux_map["ElementOutput"] = "Conv2d::ElementC" + aux_map["tensor_c_arg"] = "tensor_c" + aux_map["tensor_d_arg"] = "tensor_d" + aux_map["split_k_reset"] = aux_map["split_k_update"] = aux_map["split_k_reduction"] = "" + + template = substitute_template(template, aux_map) + + return substitute_template(template, attrs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/conv2d_profiler.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/conv2d_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..5c55f3706a7c3016b0bb595343b25ef85ddc7a58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/conv2d_profiler.py @@ -0,0 +1,215 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=import-outside-toplevel, invalid-name +"""Instantiate a C++ source for profiling CUTLASS kernels.""" + +from .library import DataTypeTag + + +class Conv2dProfilerEmitter(object): + """Emit a C++ source for profiling CUTLASS kernels.""" + + def __init__(self): + from jinja2 import Template + + self.reduction = """ + ReductionDevice reduction_op; + static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemm::kConvolutionalOperator; + typename ReductionDevice::Arguments reduction_args( + cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(), + problem_size.split_k_slices, + cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size), + { + reinterpret_cast (workspace.get()), + ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) + }, + { + tensor_d.device_data(), + ReductionStrideIndex(tensor_d.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) + }, + { + tensor_c.device_data(), + ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) + }, + {ElementComputeEpilogue(1), ElementComputeEpilogue(0)} + ); + + reduction_op.initialize(reduction_args, nullptr); + reduction_op(); +""" + + self.template = Template( + """ +#include +#include "cutlass/cutlass.h" +#include "cutlass/conv/kernel/default_conv2d_fprop.h" +#include "cutlass/conv/kernel/default_conv2d_wgrad.h" +#include "cutlass/conv/kernel/default_conv2d_dgrad.h" +#include "cutlass/conv/device/implicit_gemm_convolution.h" +#include "cutlass/util/command_line.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/reduction/device/reduce_split_k.h" +#include "cutlass/reduction/thread/reduction_operators.h" + +#define CUTLASS_CHECK(status) \ + { \ + cutlass::Status error = status; \ + if (error != cutlass::Status::kSuccess) { \ + std::cerr << "Got cutlass error: " << cutlassGetStatusString(error) << " at: " << __LINE__ \ + << std::endl; \ + exit(EXIT_FAILURE); \ + } \ + } + +{{OperatorDef}} +using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<{{OperatorName}}>; + +struct Options { + cutlass::Tensor4DCoord input_size; + cutlass::Tensor4DCoord filter_size; + cutlass::Tensor4DCoord padding; + cutlass::MatrixCoord conv_stride; + cutlass::MatrixCoord dilation; + + void parse(int argc, char const **args) { + cutlass::CommandLine cmd(argc, args); + cmd.get_cmd_line_argument("n", input_size.n()); + cmd.get_cmd_line_argument("h", input_size.h()); + cmd.get_cmd_line_argument("w", input_size.w()); + cmd.get_cmd_line_argument("c", input_size.c()); + cmd.get_cmd_line_argument("k", filter_size.n()); + cmd.get_cmd_line_argument("r", filter_size.h()); + cmd.get_cmd_line_argument("s", filter_size.w()); + int pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w; + cmd.get_cmd_line_argument("pad_h", pad_h); + cmd.get_cmd_line_argument("pad_w", pad_w); + cmd.get_cmd_line_argument("stride_h", stride_h); + cmd.get_cmd_line_argument("stride_w", stride_w); + cmd.get_cmd_line_argument("dilation_h", dilation_h); + cmd.get_cmd_line_argument("dilation_w", dilation_w); + filter_size.c() = input_size.c(); + padding = {pad_h, pad_h, pad_w, pad_w}; + conv_stride = {stride_h, stride_w}; + dilation = {dilation_h, dilation_w}; + } + + cutlass::Tensor4DCoord output_size() const { + auto dilated_h = (filter_size.h() - 1) * dilation.row() + 1; + auto dilated_w = (filter_size.w() - 1) * dilation.column() + 1; + auto h = (input_size.h() + padding.n() + padding.h() - dilated_h) / conv_stride.row() + 1; + auto w = (input_size.w() + padding.w() + padding.c() - dilated_w) / conv_stride.column() + 1; + return cutlass::Tensor4DCoord(input_size.n(), h, w, filter_size.n()); + } +}; + +double profile_convolution(Options const &options) { + using ElementOutput = {{ElementOutput}}; + using ElementInputA = typename ImplicitGemm::ElementA; + using ElementInputB = typename ImplicitGemm::ElementB; + + int split_k_slices = {{SplitK}}; + cutlass::conv::Conv2dProblemSize problem_size( + options.input_size, + options.filter_size, + options.padding, + options.conv_stride, + options.dilation, + options.output_size(), + cutlass::conv::Mode::kCrossCorrelation, + split_k_slices + ); + + auto conv_kind = ImplicitGemm::kConvolutionalOperator; + auto a_extent = implicit_gemm_tensor_a_extent(conv_kind, problem_size); + auto b_extent = implicit_gemm_tensor_b_extent(conv_kind, problem_size); + auto c_extent = implicit_gemm_tensor_c_extent(conv_kind, problem_size); + + using LayoutC = typename ImplicitGemm::LayoutC; + cutlass::HostTensor tensor_a(a_extent); + cutlass::HostTensor tensor_b(b_extent); + cutlass::HostTensor tensor_c(c_extent); + cutlass::HostTensor tensor_d(c_extent); + cutlass::HostTensor tensor_c_gemm(c_extent); + + using ElementComputeEpilogue = typename ImplicitGemm::ElementCompute; + + cutlass::conv::SplitKMode const split_k_mode = split_k_slices > 1 ? + cutlass::conv::SplitKMode::kParallel : cutlass::conv::SplitKMode::kSerial; + + typename ImplicitGemm::Arguments arguments{ + problem_size, + tensor_a.device_ref(), + tensor_b.device_ref(), + tensor_c_gemm.device_ref(), + tensor_c_gemm.device_ref(), + {ElementComputeEpilogue(1), ElementComputeEpilogue(0)}, + split_k_mode, + }; + + ImplicitGemm implicit_gemm_op; + size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); + cutlass::device_memory::allocation workspace(workspace_size); + auto status = implicit_gemm_op.can_implement(arguments); + CUTLASS_CHECK(status); + + status = implicit_gemm_op.initialize(arguments, workspace.get()); + CUTLASS_CHECK(status); + status = implicit_gemm_op(); + CUTLASS_CHECK(status); + + cudaEvent_t events[2]; + for (auto & event : events) { + cudaEventCreate(&event); + } + cudaEventRecord(events[0]); + + for (int iteration = 0; iteration < 100; ++iteration) { + auto status = implicit_gemm_op(); + CUTLASS_CHECK(status); + {{Reduction}} + } + + cudaEventRecord(events[1]); + cudaEventSynchronize(events[1]); + float runtime_ms = 0; + cudaEventElapsedTime(&runtime_ms, events[0], events[1]); + + for (auto event : events) { + (void)cudaEventDestroy(event); + } + return double(runtime_ms) / 100.0; +} + +int main(int argc, char const **args) { + Options options; + options.parse(argc, args); + std::cout << profile_convolution(options) << std::endl; + return 0; +} +""" + ) + + def emit(self, op_def, op_name, element_output, split_k_slices=1): + src = self.template.render( + OperatorDef=op_def, + OperatorName=op_name, + ElementOutput=DataTypeTag[element_output], + SplitK=split_k_slices, + Reduction=self.reduction if split_k_slices > 1 else "", + ) + return src diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gemm_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gemm_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..2639a0359ae9c27ba3a0dc3538789b2fb52847cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gemm_operation.py @@ -0,0 +1,477 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, unused-wildcard-import, wildcard-import, pointless-exception-statement +"""Generator for CUTLASS GEMM kernels.""" +from .library import * + + +class GemmOperation: + """Describes various attributes for instantiating GEMM kernels.""" + + def __init__( + self, + arch, + tile_description, + A, + B, + C, + element_epilogue, + epilogue_functor=EpilogueFunctor.LinearCombination, + swizzling_functor=SwizzlingFunctor.Identity8, + ): + self.operation_kind = OperationKind.Gemm + self.arch = arch + self.tile_description = tile_description + self.A = A + self.B = B + self.C = C + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.swizzling_functor = swizzling_functor + + def accumulator_type(self): + return self.tile_description.math_instruction.element_accumulator + + def short_math_name(self): + return ShortDataTypeNames[self.accumulator_type()] + + def core_name(self): + """The basic operation kind is prefixed with a letter indicating the accumulation type.""" + inst_shape = "" + intermediate_type = "" + + if ( + self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp + or self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp + ): + inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) + if ( + self.tile_description.math_instruction.element_a != self.A.element + and self.tile_description.math_instruction.element_a + != self.tile_description.math_instruction.element_accumulator + ): + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + + return f"{self.short_math_name()}{inst_shape}{intermediate_type}gemm" + + def extended_name(self): + """Append data types if they differ from compute type.""" + if ( + self.C.element != self.tile_description.math_instruction.element_accumulator + and self.A.element != self.tile_description.math_instruction.element_accumulator + ): + extended_name = "${element_c}_${core_name}_${element_a}" + elif ( + self.C.element == self.tile_description.math_instruction.element_accumulator + and self.A.element != self.tile_description.math_instruction.element_accumulator + ): + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = substitute_template( + extended_name, + { + "element_a": DataTypeNames[self.A.element], + "element_c": DataTypeNames[self.C.element], + "core_name": self.core_name(), + }, + ) + + return extended_name + + def layout_name(self): + return f"{ShortLayoutTypeNames[self.A.layout]}{ShortLayoutTypeNames[self.B.layout]}" + + def procedural_name(self): + """The full procedural name indicates architecture, extended name, tile size, + and layout. + """ + threadblock = self.tile_description.procedural_name() + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + + return substitute_template( + "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}", + { + "opcode_class": opcode_class_name, + "extended_name": self.extended_name(), + "threadblock": threadblock, + "layout": self.layout_name(), + "alignment": f"{self.A.alignment}", + }, + ) + + def leading_dim(self): + """lda, ldb, ldc, according to the leading dimension.""" + if self.A.layout == LayoutType.RowMajor: + lda = "K" + elif self.A.layout == LayoutType.ColumnMajor: + lda = "M" + else: + ValueError("The layout of A is not implemented.") + + if self.B.layout == LayoutType.RowMajor: + ldb = "N" + elif self.B.layout == LayoutType.ColumnMajor: + ldb = "K" + else: + ValueError("The layout of B is not implemented.") + + if self.C.layout == LayoutType.RowMajor: + ldc = "N" + elif self.C.layout == LayoutType.ColumnMajor: + ldc = "M" + else: + ValueError("The layout of B is not implemented.") + + return substitute_template( + "int lda = ${lda_val};\n\tint ldb = ${ldb_val};\n\tint ldc = ${ldc_val};\n", + {"lda_val": lda, "ldb_val": ldb, "ldc_val": ldc}, + ) + + +class EmitGemmInstance: + """Responsible for emitting a CUTLASS template definition.""" + + def __init__(self): + self.epilogue_default = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >""" + + self.epilogue_no_beta_scaling = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue}, + cutlass::epilogue::thread::ScaleType::NoBetaScaling + >""" + + self.epilogue_residual_block = """ + ${epilogue_functor}< + ${element_c}, + ${element_accumulator}, + ${element_epilogue}, + ${element_c}, + ${epilogue_vector_length}, + ${activation}, + ${binary_op}, + ${unary_op} + >""" + + self.gemm_template = """ + // Gemm operator ${operation_name} + using Operation_${operation_name} = cutlass::gemm::device::${kernel_name}< + ${element_a}, ${layout_a}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue}, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b} + >; +""" + + def emit(self, operation, no_beta_scaling=False, batched=False, residual_block_info=False): + """Instantiate a GEMM kernel from given `operation`.""" + warp_shape = [ + operation.tile_description.threadblock_shape[idx] + // operation.tile_description.warp_count[idx] + for idx in range(3) + ] + epilogue_vector_length = ( + min(operation.C.alignment * DataTypeSize[operation.C.element], 128) + // DataTypeSize[operation.C.element] + ) + values = { + "operation_name": operation.procedural_name(), + "element_a": DataTypeTag[operation.A.element], + "layout_a": LayoutTag[operation.A.layout], + "element_b": DataTypeTag[operation.B.element], + "layout_b": LayoutTag[operation.B.layout], + "element_c": DataTypeTag[operation.C.element], + "layout_c": LayoutTag[operation.C.layout], + "element_accumulator": DataTypeTag[operation.accumulator_type()], + "opcode_class": OpcodeClassTag[ + operation.tile_description.math_instruction.opcode_class + ], + "arch": f"cutlass::arch::Sm{operation.arch}", + "threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]), + "threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]), + "threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]), + "warp_shape_m": str(warp_shape[0]), + "warp_shape_n": str(warp_shape[1]), + "warp_shape_k": str(warp_shape[2]), + "instruction_shape_m": str( + operation.tile_description.math_instruction.instruction_shape[0] + ), + "instruction_shape_n": str( + operation.tile_description.math_instruction.instruction_shape[1] + ), + "instruction_shape_k": str( + operation.tile_description.math_instruction.instruction_shape[2] + ), + "epilogue_vector_length": str(epilogue_vector_length), + "element_epilogue": str(DataTypeTag[operation.element_epilogue]), + "epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor], + "swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor], + "stages": str(operation.tile_description.stages), + "align_a": str(operation.A.alignment), + "align_b": str(operation.B.alignment), + "math_operation": MathOperationTag[ + operation.tile_description.math_instruction.math_operation + ], + } + + values["kernel_name"] = "GemmBatched" if batched else "Gemm" + + if residual_block_info: + values["kernel_name"] = "GemmUniversalWithBroadcast" + template = substitute_template( + self.gemm_template, {"epilogue": self.epilogue_residual_block} + ) + values.update( + { + "unary_op": residual_block_info["unary_op"], + "binary_op": residual_block_info["binary_op"], + "activation": residual_block_info["activation"], + } + ) + elif no_beta_scaling: + template = substitute_template( + self.gemm_template, {"epilogue": self.epilogue_no_beta_scaling} + ) + else: + template = substitute_template(self.gemm_template, {"epilogue": self.epilogue_default}) + + return substitute_template(template, values) + + +def instantiate_gemm_template(attrs): + """Return CUTLASS host code for GEMM based on a template and the provided attribute map.""" + + argument_template_default = """ + typename ${kernel}::Arguments arguments{ + problem_size, + {static_cast(ptr_a), ${lda}}, ${batch_stride_A} + {static_cast(ptr_b), ${ldb}}, ${batch_stride_B} + {static_cast(${ptr_c}), ${c_stride}}, ${batch_stride_C} + {static_cast(ptr_out), ${ldc}}, ${batch_stride_D} + {${alpha_beta}}, + ${split_k_slices_or_batch} + }; + """ + + # See cutlass/gemm/kernel/gemm_with_fused_epilogue.h + argument_template_residual = """ + typename ${kernel}::Arguments arguments{ + cutlass::gemm::GemmUniversalMode::${gemm_universal_mode}, + problem_size, + ${split_k_slices_or_batch}, // batch_count + {${alpha_beta}}, + static_cast(ptr_a), + static_cast(ptr_b), + static_cast(ptr_residual), + static_cast(ptr_out), + static_cast(ptr_bias), + nullptr, // ptr_Tensor + ${batch_stride_A} + ${batch_stride_B} + ${batch_stride_C} + ${batch_stride_D} + 0, // batch_stride_Vector, + 0, // batch_stride_Tensor, + ${lda}, + ${ldb}, + ${ldc}, + ${ldc}, + 0, // ldv, the stride for bias + 0, // ldt + }; + """ + + template = """ + using ElementInputA = ${ElementInputA}; + using ElementInputB = ${ElementInputB}; + using ElementOutput = ${ElementOutput}; + using ElementComputeEpilogue = ${ElementOutput}; + + ${cutlass_op_def} + + using ${kernel} = Operation_${cutlass_op_name}; + int M = ${M}; + int N = ${N}; + int K = ${K}; + cutlass::gemm::GemmCoord problem_size(M, N, K); + ElementComputeEpilogue alpha = ElementComputeEpilogue(1); + ElementComputeEpilogue beta = ElementComputeEpilogue(${beta}); + void* ptr_a = (void*)(${lhs_arg}->data); + void* ptr_b = (void*)(${rhs_arg}->data); + ${bias_decl} + ${residual_decl} + void* ptr_out = (void*)(out0->data); + + ${argument} + size_t workspace_size = ${kernel}::get_workspace_size(arguments); + cutlass::device_memory::allocation workspace(workspace_size); + ${kernel} gemm_op; + cutlass::Status status = gemm_op.can_implement(arguments); + CHECK(status == cutlass::Status::kSuccess); + status = gemm_op.initialize(arguments, workspace.get()); + CHECK(status == cutlass::Status::kSuccess); + + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + + status = gemm_op(stream); + CHECK(status == cutlass::Status::kSuccess); +""" + op_type = attrs["op_type"] + has_bias = "bias" in op_type + is_gelu = "gelu" in op_type + batched = "batch" in attrs + has_residual_block = "residual" in op_type + aux_map = {"kernel": "Gemm"} + + if has_bias: + aux_map.update( + { + "bias_decl": "void* ptr_bias = (void*)(${bias_arg}->data);\n", + "ptr_c": "ptr_bias", + "c_stride": ( + "(${bias_arg}->ndim == 1 ||" + " ${bias_arg}->shape[${bias_arg}->ndim - 2] == 1) ? 0 : " + attrs["ldc"] + ), + } + ) + else: + aux_map.update({"bias_decl": "", "ptr_c": "ptr_out", "c_stride": attrs["ldc"]}) + + if is_gelu or has_residual_block: + # GeLU epilogue does not compile with NoBetaScaling, so we explicitly specify the scale. + aux_map["beta"] = 1 + else: + aux_map["beta"] = 0 + + if has_bias and not is_gelu and not has_residual_block: + aux_map["alpha_beta"] = "alpha" + else: + aux_map["alpha_beta"] = "alpha, beta" + + for key in ["batch_stride_A", "batch_stride_B", "batch_stride_C"]: + if not batched and not has_residual_block: + aux_map[key] = "" + else: + aux_map[key] = attrs.get(key, "0") + "," + + aux_map["batch_stride_D"] = aux_map["batch_stride_C"] + if has_bias and batched and not has_residual_block: + aux_map["batch_stride_C"] = "0," + + if batched: + attrs["split_k_slices_or_batch"] = attrs["batch"] + else: + attrs["split_k_slices_or_batch"] = 1 + + if has_residual_block: + template = substitute_template(template, {"argument": argument_template_residual}) + aux_map["residual_decl"] = "void* ptr_residual = (void*)(${residual_arg}->data);\n" + aux_map["gemm_universal_mode"] = "kBatched" if batched else "kGemm" + else: + template = substitute_template(template, {"argument": argument_template_default}) + aux_map["residual_decl"] = "" + + template = substitute_template(template, aux_map) + + return substitute_template(template, attrs) + + +def emit_fp16A_intB_matmul(attrs): + """Return CUTLASS host code for fp16 A and int4 or int8 B GEMM.""" + if attrs["group_size"] > 0: + attrs["quant_op"] = "cutlass::WeightOnlyQuantOp::FINEGRAINED_SCALE_ONLY" + else: + attrs["quant_op"] = "cutlass::WeightOnlyQuantOp::PER_COLUMN_SCALE_ONLY" + attrs["group_size"] = "k" + + attrs["template_common"] = substitute_template( + """ + using namespace fastertransformer; + constexpr auto QuantOp = ${quant_op}; + + int m = ${M}; + int n = ${B_arg}->shape[1] * ${float_per_int}; + int k = ${B_arg}->shape[0]; + + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + """, + attrs, + ) + + template = """ + ${template_common} + gemm_fp16_int_bias_act<${weight_dtype}, QuantOp>(static_cast(${A_arg}->data), + static_cast<${weight_dtype}*>(${B_arg}->data), + static_cast(${scales_arg}->data), + ${bias}, + static_cast(out0->data), + "${activation}", + m, n, k, ${group_size}, ${bias_stride}, nullptr, 0, stream); +""" + + template_residual = """ + ${template_common} + gemm_fp16_int_bias_act_residual<${weight_dtype}, QuantOp>(static_cast(${A_arg}->data), + static_cast<${weight_dtype}*>(${B_arg}->data), + static_cast(${scales_arg}->data), + ${bias}, + static_cast(${residual_arg}->data), + static_cast(out0->data), "${activation}", "${binary_op}", "${unary_op}", + m, n, k, ${group_size}, nullptr, 0, stream); +""" + + if "residual_arg" in attrs: + if "bias_arg" in attrs: + bias = "static_cast(${bias_arg}->data)" + else: + bias = "nullptr" + + template_residual = substitute_template(template_residual, {"bias": bias}) + return substitute_template(template_residual, attrs) + + if "bias_arg" in attrs: + template = substitute_template( + template, {"bias": "static_cast(${bias_arg}->data)"} + ) + else: + template = substitute_template(template, {"bias": "nullptr"}) + + return substitute_template(template, attrs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gemm_profiler.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gemm_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..e89e7defbfb7edbeae3bfdbb5a61172708b83995 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gemm_profiler.py @@ -0,0 +1,196 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=import-outside-toplevel, invalid-name +"""Instantiate a C++ source for profiling CUTLASS kernels.""" + + +class GemmProfilerEmitter(object): + """Emit a C++ source for profiling CUTLASS kernels.""" + + def __init__(self): + from jinja2 import Template + + self.template = Template( + """ +#include +#include +#include +#include + +#include "cuda_runtime.h" +#include "cutlass/gemm/device/gemm.h" + +#define CUTLASS_CHECK(status) \\ + { \\ + cutlass::Status error = status; \\ + if (error != cutlass::Status::kSuccess) { \\ + std::cerr << "Got cutlass error: " << cutlassGetStatusString(error) << " at: " << __LINE__ \\ + << std::endl; \\ + exit(EXIT_FAILURE); \\ + } \\ + } + +#define CUDA_CHECK(status) \\ + { \\ + cudaError_t error = status; \\ + if (error != cudaSuccess) { \\ + std::cerr << "Got bad cuda status: " << cudaGetErrorString(error) \\ + << " at line: " << __LINE__ << std::endl; \\ + exit(EXIT_FAILURE); \\ + } \\ + } + +template +cudaError_t CutlassGemm( + int M, + int N, + int K, + DTypeC alpha, + DTypeA const *A, + int lda, + DTypeB const *B, + int ldb, + DTypeC beta, + DTypeC *C, + int ldc) { + using namespace std::chrono; + {{OperatorDef}} + Operation_{{OperatorName}} gemm_operator; + Operation_{{OperatorName}}::Arguments args({M, N, K}, + {A, lda}, + {B, ldb}, + {C, ldc}, + {C, ldc}, + {alpha, beta}); + cutlass::Status status = gemm_operator(args); + CUTLASS_CHECK(status) + + high_resolution_clock::time_point t1 = high_resolution_clock::now(); + for (int i = 0; i < 100; ++i) { + status = gemm_operator(args); + } + cudaDeviceSynchronize(); + high_resolution_clock::time_point t2 = high_resolution_clock::now(); + duration time_span = duration_cast>(t2 - t1); + std::cout << time_span.count() << std::endl; + return cudaSuccess; +} + + +template +cudaError_t AllocateMatrix(DType **matrix, int ldm, int rows, int columns, int seed = 0) { + cudaError_t result; + + size_t sizeof_matrix = sizeof(DType) * rows * columns; + + // Allocate device memory. + result = cudaMalloc(reinterpret_cast(matrix), sizeof_matrix); + + if (result != cudaSuccess) { + std::cerr << "Failed to allocate matrix: " + << cudaGetErrorString(result) << std::endl; + return result; + } + + // Clear the allocation. + result = cudaMemset(*matrix, 0, sizeof_matrix); + + if (result != cudaSuccess) { + std::cerr << "Failed to clear matrix device memory: " + << cudaGetErrorString(result) << std::endl; + return result; + } + + if (result != cudaSuccess) { + std::cerr << "Failed to initialize matrix: " + << cudaGetErrorString(result) << std::endl; + return result; + } + + return result; +} + +template +cudaError_t TestCutlassGemm(int M, int N, int K, DTypeC alpha, DTypeC beta) { + cudaError_t result; + + {{LeadingDim}} + // size_t sizeof_C = sizeof(DTypeC) * ldc * N; + DTypeA *A; + DTypeB *B; + DTypeC *C_cutlass; + result = AllocateMatrix(&A, lda, M, K, 0); + if (result != cudaSuccess) { + return result; + } + result = AllocateMatrix(&B, ldb, K, N, 17); + if (result != cudaSuccess) { + cudaFree(A); + return result; + } + result = AllocateMatrix(&C_cutlass, ldc, M, N, 101); + if (result != cudaSuccess) { + cudaFree(A); + cudaFree(B); + return result; + } + result = CutlassGemm(M, N, K, alpha, A, lda, B, ldb, + beta, C_cutlass, ldc); + if (result != cudaSuccess) { + std::cerr << "CUTLASS GEMM kernel failed: " + << cudaGetErrorString(result) << std::endl; + cudaFree(C_cutlass); + cudaFree(B); + cudaFree(A); + + return result; + } + cudaFree(C_cutlass); + cudaFree(B); + cudaFree(A); + return cudaSuccess; +} + +int main(int argc, const char *arg[]) { + int problem[3] = { 4096, 4096, 4096 }; + for (int i = 1; i < argc && i < 4; ++i) { + std::stringstream ss(arg[i]); + ss >> problem[i - 1]; + } + float scalars[2] = { 1, 0 }; + cudaError_t result = TestCutlassGemm< {{DTypeA}}, {{DTypeB}}, {{DTypeC}}>( + problem[0], // GEMM M dimension + problem[1], // GEMM N dimension + problem[2], // GEMM K dimension + static_cast<{{DTypeC}}>(scalars[0]), // alpha + static_cast<{{DTypeC}}>(scalars[1]) // beta + ); + return result == cudaSuccess ? 0 : -1; +} +""" + ) + + def emit(self, op_name, op_def, dtype_a, dtype_b, dtype_c, ld): + src = self.template.render( + OperatorName=op_name, + OperatorDef=op_def, + DTypeA=dtype_a, + DTypeB=dtype_b, + DTypeC=dtype_c, + LeadingDim=ld, + ) + return src diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_conv2d.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..3d14a427b1a349382af31ec0ea44d14f4f7e72e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_conv2d.py @@ -0,0 +1,391 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, dangerous-default-value +"""Conv2d kernel generator and profiler for CUTLASS.""" +import os +import pickle +from functools import partial +from .conv2d_operation import Conv2dOperation, EmitConv2dInstance +from .gen_gemm import CutlassGemmProfiler +from .conv2d_profiler import Conv2dProfilerEmitter +from .gen_tensor_op import ProfilerEngine, GENERATOR_FUNC_TABLE, EPILOGUE_MAP +from .library import ( + DataType, + EpilogueFunctor, + SwizzlingFunctor, + TensorDescription, + LayoutType, + ConvKind, + StrideSupport, + IteratorAlgorithm, +) + + +def create_conv2d_operator_with_epilogue( + conv_kind, + stride_support, + op_type, + tile_description, + data_type, + alignment, + alignment_epilogue, + swizzling_functor, + split_k_slices, +): + """ + Instantiate a cutlass kernel from the given configuration, + along with the epilouge functor + """ + if "residual" in op_type: + activation_map = { + "cutlass.conv2d_bias_hardswish": "cutlass::epilogue::thread::HardSwish", + "cutlass.conv2d_bias_silu": "cutlass::epilogue::thread::SiLu", + "cutlass.conv2d_bias_sigmoid": "cutlass::epilogue::thread::Sigmoid", + "cutlass.conv2d_bias_relu": "cutlass::epilogue::thread::ReLu", + "cutlass.conv2d_bias": "cutlass::epilogue::thread::Identity", + } + prefix = op_type[: op_type.find("_residual")] + activation = activation_map[prefix] + binary_op = "cutlass::multiplies" if "residual_multiply" in op_type else "cutlass::plus" + unary_op = ( + "cutlass::epilogue::thread::ReLu" + if op_type.endswith("relu") + else "cutlass::epilogue::thread::Identity" + ) + residual_block_info = { + "activation": activation, + "binary_op": binary_op, + "unary_op": unary_op, + } + epilogue = EpilogueFunctor.LinearCombinationResidualBlock + no_beta_scaling = False + else: + residual_block_info = None + epilogue, no_beta_scaling = EPILOGUE_MAP[op_type] + + element_a, element_b, element_c, element_epilogue = data_type + + A = TensorDescription(element_a, LayoutType.TensorNHWC, alignment) + B = TensorDescription(element_b, LayoutType.TensorNHWC, alignment) + C = TensorDescription(element_c, LayoutType.TensorNHWC, alignment_epilogue) + + op = Conv2dOperation( + conv_kind, + IteratorAlgorithm.Optimized, + tile_description.minimum_compute_capability, + tile_description, + A, + B, + C, + element_epilogue, + stride_support, + epilogue, + swizzling_functor, + split_k_slices, + ) + + name = op.procedural_name() + opdef = EmitConv2dInstance().emit( + op, + no_beta_scaling=no_beta_scaling, + residual_block_info=residual_block_info, + emit_reduction=split_k_slices > 1, + ) + + return name, opdef + + +def enumerate_conv2d_operators( + conv_kind, + stride_support, + split_k_slices, + alignment_c, + tile_descriptions, + data_type, + alignment_constraints, + swizzling_functor=SwizzlingFunctor.Identity4, +): + """Exhaustively instantiate all kernels from a given configuration.""" + ret = [] + + kernel_emitter = EmitConv2dInstance() + profiler_emitter = Conv2dProfilerEmitter() + + element_a, element_b, element_c, element_epilogue = data_type + + if conv_kind == ConvKind.Dgrad and stride_support == StrideSupport.Strided: + swizzling_functor = SwizzlingFunctor.StridedDgradIdentity1 + + for split_k_slice in split_k_slices: + for tile in tile_descriptions: + for alignmentAB in alignment_constraints: + for alignmentC in alignment_c: + + A = TensorDescription(element_a, LayoutType.TensorNHWC, alignmentAB) + B = TensorDescription(element_b, LayoutType.TensorNHWC, alignmentAB) + C = TensorDescription(element_c, LayoutType.TensorNHWC, alignmentC) + + if element_c == DataType.s32 and A.alignment == 1: + tile.threadblock_shape[0] = min(tile.threadblock_shape[0], 128) + tile.threadblock_shape[1] = min(tile.threadblock_shape[1], 128) + + op = Conv2dOperation( + conv_kind, + IteratorAlgorithm.Optimized, + tile.minimum_compute_capability, + tile, + A, + B, + C, + element_epilogue, + stride_support, + EpilogueFunctor.LinearCombination, + swizzling_functor, + split_k_slice, + ) + + ret.append( + { + "src": profiler_emitter.emit( + kernel_emitter.emit(op, emit_reduction=split_k_slice > 1), + op.procedural_name(), + element_output=element_c, + split_k_slices=split_k_slice, + ), + "name": op.procedural_name(), + "tile_description": tile, + "alignment": alignmentAB, + "alignment_epilogue": alignmentC, + "data_type": data_type, + "swizzle_functor": swizzling_functor, + "split_k_slices": split_k_slice, + } + ) + + return ret + + +class CutlassConv2DProfiler: + """Profile all candidate kernels and select the best one.""" + + def __init__(self, sm, cutlass_path, binary_path): + self.gemm_profiler = CutlassGemmProfiler(sm, cutlass_path, binary_path) + self.sm = sm + assert sm in GENERATOR_FUNC_TABLE, f"sm{sm} not supported yet." + self.engine = ProfilerEngine(sm, cutlass_path, binary_path) + self.cache_path = os.path.join(binary_path, "cutlass_conv2d_cache.pickle") + if os.path.exists(self.cache_path): + self.cache = pickle.load(open(self.cache_path, "rb")) + else: + self.cache = {} + + def get_default( + self, + op_type, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + conv_kind=ConvKind.Fprop, + stride=(1, 1), + ): + """Return the default kernel for the requested architecture. + For now, the default kernel was picked arbitrary. + """ + gemm_profile_result = self.gemm_profiler.get_default( + op_type, out_dtype, arg0_dtype, arg1_dtype, use_3xtf32 + ) + tile_description = gemm_profile_result["tile_description"] + alignment = gemm_profile_result["alignment"] + data_type = gemm_profile_result["data_type"] + stride_support = StrideSupport.Strided if stride[0] > 1 else StrideSupport.Unity + + if conv_kind == ConvKind.Dgrad and stride_support == StrideSupport.Strided: + swizzling_functor = SwizzlingFunctor.StridedDgradIdentity1 + else: + swizzling_functor = SwizzlingFunctor.Identity4 + + name, opdef = create_conv2d_operator_with_epilogue( + conv_kind, + stride_support, + op_type, + tile_description, + data_type, + alignment, + alignment, + swizzling_functor, + split_k_slices=1, + ) + return {"name": name, "opdef": opdef} + + def select_op( + self, + d_shape, + w_shape, + padding, + stride, + dilation, + out_dtype, + data_dtype, + weight_dtype, + use_3xtf32, + conv_kind, + stride_support, + split_k_slices, + profile_all_alignments=False, + find_first_valid=False, + use_multiprocessing=False, + ): + """ + Profile and select the best kernel from candidate kernels. + See the documentation for the profile method below. + """ + N, H, W, IC = d_shape + OC, R, S, _ = w_shape + + workload = ( + N, + H, + W, + IC, + OC, + R, + S, + padding[0], + padding[1], + stride[0], + stride[1], + dilation[0], + dilation[1], + ) + + if workload in self.cache: + return self.cache[workload] + + def alignments(dtype): + if dtype in ["float16"]: + alignments = [8, 4, 2, 1] + elif dtype in ["float", "float32"]: + alignments = [4, 2, 1] + else: + raise ValueError("Unsupported data type: %s" % dtype) + return alignments + + alignments_c = [align for align in alignments(out_dtype) if OC % align == 0] + + if not profile_all_alignments: + alignments_c = [alignments_c[0]] + + ops = GENERATOR_FUNC_TABLE[self.sm]( + out_dtype, + data_dtype, + weight_dtype, + partial( + enumerate_conv2d_operators, + conv_kind, + stride_support, + split_k_slices, + alignments_c, + ), + lambda align: all([dim % align == 0 for dim in [IC]]), + use_3xtf32, + profile_all_alignments, + # Use fp32 accumulation for wgrad to align with cuDNN + accumlator_dtype="float32" if conv_kind == ConvKind.Wgrad else out_dtype, + ) + + if not find_first_valid: + self.engine.compile_all(ops, use_multiprocessing) + + args = ( + "--n=%d --h=%d --w=%d --c=%d --k=%d --r=%d --s=%d --pad_h=%d --pad_w=%d " + "--stride_h=%d --stride_w=%d --dilation_h=%d --dilation_w=%d" + ) % workload + + for op in ops: + out = self.engine.evaluate(op, args.split(" ")) + op["runtime"] = out + if out < float("inf") and find_first_valid: + self.cache[workload] = op + return op + + op = min(ops, key=lambda i: i["runtime"]) + self.cache[workload] = op + with open(self.cache_path, "wb") as f: + pickle.dump(self.cache, f) + return op + + def profile( + self, + op_type, + d_shape, + w_shape, + padding, + stride, + dilation, + out_dtype, + data_dtype, + weight_dtype, + use_3xtf32=True, + conv_kind=ConvKind.Fprop, + split_k_slices=[1], + profile_all_alignments=False, + find_first_valid=False, + use_multiprocessing=False, + ): + """Profile and select the best kernel from candidate kernels. + If find_first_valid is True, return immediately after the first applicable kernel is found. + If use_multiprocessing is True, compile all profiler executables in parallel. + """ + # Dgrad requires Unity stride when stride == (1, 1) + stride_support = ( + StrideSupport.Unity + if stride[0] == 1 and stride[1] == 1 and conv_kind == ConvKind.Dgrad + else StrideSupport.Strided + ) + + op = self.select_op( + d_shape, + w_shape, + padding, + stride, + dilation, + out_dtype, + data_dtype, + weight_dtype, + use_3xtf32, + conv_kind, + stride_support, + split_k_slices, + profile_all_alignments, + find_first_valid, + use_multiprocessing, + ) + + name, opdef = create_conv2d_operator_with_epilogue( + conv_kind, + stride_support, + op_type, + op["tile_description"], + op["data_type"], + op["alignment"], + op["alignment_epilogue"], + op["swizzle_functor"], + op["split_k_slices"], + ) + + return name, opdef, op["runtime"] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_gemm.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..a3204791f008a238f31176c41cf741d4f3aa53e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_gemm.py @@ -0,0 +1,351 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""GEMM kernel generator and profiler for CUTLASS.""" +import os +import pickle +from functools import partial + +from .gemm_operation import EmitGemmInstance, GemmOperation +from .gemm_profiler import GemmProfilerEmitter +from .gen_tensor_op import EPILOGUE_MAP, GENERATOR_FUNC_TABLE, ProfilerEngine +from .library import ( + DataType, + DataTypeTag, + EpilogueFunctor, + LayoutType, + SwizzlingFunctor, + TensorDescription, +) + + +def create_gemm_operator_with_epilogue( + op_type, + tile_description, + data_type, + alignment, + swizzling_functor, + batched=False, + layout_b=LayoutType.ColumnMajor, +): + """ + Instantiate a cutlass kernel from the given configuration, + along with the epilouge functor + """ + element_a, element_b, element_c, element_epilogue = data_type + + A = TensorDescription(element_a, LayoutType.RowMajor, alignment) + B = TensorDescription(element_b, layout_b, alignment) + C = TensorDescription(element_c, LayoutType.RowMajor, alignment) + + if batched: + swizzling_functor = SwizzlingFunctor.Batched + + if "residual" in op_type: + if "hardswish" in op_type: + activation = "cutlass::epilogue::thread::HardSwish" + elif "silu" in op_type: + activation = "cutlass::epilogue::thread::SiLu" + elif "sigmoid" in op_type: + activation = "cutlass::epilogue::thread::Sigmoid" + elif "gelu" in op_type: + activation = "cutlass::epilogue::thread::GELU" + elif "relu" in op_type: + activation = "cutlass::epilogue::thread::ReLu" + else: + activation = "cutlass::epilogue::thread::Identity" + + binary_op = "cutlass::multiplies" if "residual_multiply" in op_type else "cutlass::plus" + unary_op = ( + "cutlass::epilogue::thread::ReLu" + if op_type.endswith("relu") + else "cutlass::epilogue::thread::Identity" + ) + residual_block_info = { + "activation": activation, + "binary_op": binary_op, + "unary_op": unary_op, + } + epilogue = EpilogueFunctor.LinearCombinationResidualBlock + no_beta_scaling = False + else: + residual_block_info = None + epilogue, no_beta_scaling = EPILOGUE_MAP[op_type] + + op = GemmOperation( + tile_description.minimum_compute_capability, + tile_description, + A, + B, + C, + element_epilogue, + epilogue, + swizzling_functor, + ) + + return ( + op.procedural_name(), + EmitGemmInstance().emit( + op, + no_beta_scaling=no_beta_scaling, + batched=batched, + residual_block_info=residual_block_info, + ), + ) + + +def enumerate_gemm_operators( + tile_descriptions, + data_type, + alignment_constraints, + swizzling_functor=SwizzlingFunctor.Identity8, + layout_b=LayoutType.ColumnMajor, +): + """Exhaustively instantiate all kernels from a given configuration.""" + ret = [] + kernel_emitter = EmitGemmInstance() + profiler_emitter = GemmProfilerEmitter() + + element_a, element_b, element_c, element_epilogue = data_type + + for tile_description in tile_descriptions: + for alignment in alignment_constraints: + A = TensorDescription(element_a, LayoutType.RowMajor, alignment) + B = TensorDescription(element_b, layout_b, alignment) + C = TensorDescription(element_c, LayoutType.RowMajor, alignment) + + if element_c == DataType.s32 and A.alignment == 1: + tile_description.threadblock_shape[0] = min( + tile_description.threadblock_shape[0], 128 + ) + tile_description.threadblock_shape[1] = min( + tile_description.threadblock_shape[1], 128 + ) + + op = GemmOperation( + tile_description.minimum_compute_capability, + tile_description, + A, + B, + C, + element_epilogue, + EpilogueFunctor.LinearCombination, + swizzling_functor, + ) + + src = profiler_emitter.emit( + op.procedural_name(), + kernel_emitter.emit(op, batched=False), + DataTypeTag[element_a], + DataTypeTag[element_b], + DataTypeTag[element_c], + op.leading_dim(), + ) + + ret.append( + { + "src": src, + "op": op, + "name": op.procedural_name(), + "tile_description": tile_description, + "alignment": alignment, + "data_type": data_type, + "swizzle_functor": swizzling_functor, + } + ) + + return ret + + +# TODO(masahi): A sensible way to pick reasonable default kernels +DEFAULT_KERNELS = { + 75: { + ("float16", "float16"): "cutlass_tensorop_h1688gemm_128x64_32x2_tn_align1", + ("float16", "float32"): "cutlass_tensorop_s1688gemm_f16_64x64_32x2_tn_align1", + }, + # align1 variants do not seem to be available for sm80 + 80: { + ("float16", "float16"): "cutlass_tensorop_h1688gemm_128x64_32x2_tn_align1", + ("float16", "float32"): "cutlass_tensorop_s1688gemm_f16_64x64_32x2_tn_align1", + # two kernels for tf32 and 3xtf32 + ("float32", "float32"): ( + "cutlass_tensorop_s1688gemm_128x64_32x3_tn_align1", + "cutlass_tensorop_s1688gemm_64x64_16x3_tn_align1", + ), + }, +} + + +class CutlassGemmProfiler: + """Profile all candidate kernels and select the best one.""" + + def __init__(self, sm, cutlass_path, binary_path): + assert sm in GENERATOR_FUNC_TABLE and sm in DEFAULT_KERNELS, f"sm{sm} not supported yet." + self.engine = ProfilerEngine(sm, cutlass_path, binary_path) + self.sm = sm + self.cache_path = os.path.join(binary_path, "cutlass_gemm_cache.pickle") + if os.path.exists(self.cache_path): + self.cache = pickle.load(open(self.cache_path, "rb")) + else: + self.cache = {} + + def get_default( + self, + op_type, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32=True, + batched=False, + layout_b=LayoutType.ColumnMajor, + ): + """Return the default kernel for the requested architecture. + For now, the default kernel was picked arbitrary. + """ + ops = GENERATOR_FUNC_TABLE[self.sm]( + out_dtype, + arg0_dtype, + arg1_dtype, + partial(enumerate_gemm_operators, layout_b=layout_b), + lambda align: align == 1, # Only request align1 kernels + use_3xtf32, + profile_all_alignments=True, # To include all align1 kernels + # TODO(masahi): Invesitigate when fp32 accumulation is needed for gemm + accumlator_dtype=out_dtype, + ) + + default_kernel_name = DEFAULT_KERNELS[self.sm][(arg0_dtype, out_dtype)] + + if arg0_dtype == "float32": + default_kernel_name = ( + default_kernel_name[0] if not use_3xtf32 else default_kernel_name[1] + ) + + filtered = list(filter(lambda op: op["name"] == default_kernel_name, ops)) + assert len(filtered) == 1 + op = filtered[0] + name, opdef = create_gemm_operator_with_epilogue( + op_type, + op["tile_description"], + op["data_type"], + op["alignment"], + op["swizzle_functor"], + batched=batched, + layout_b=layout_b, + ) + op.update({"name": name, "opdef": opdef}) + return op + + def select_op( + self, + M, + N, + K, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + profile_all_alignments=False, + find_first_valid=False, + use_multiprocessing=False, + layout_b=LayoutType.ColumnMajor, + ): + """ + Profile and select the best kernel from candidate kernels. + See the documentation for the profile method below. + """ + if (M, N, K) in self.cache: + op = self.cache[(M, N, K)] + return op + + # TODO(masahi): CUTLASS alignment check on gemm kernels is too restrictive. + # See https://github.com/NVIDIA/cutlass/issues/362. + # When the above issue is resolved, we can remove the alignment check on M below. + + ops = GENERATOR_FUNC_TABLE[self.sm]( + out_dtype, + arg0_dtype, + arg1_dtype, + partial(enumerate_gemm_operators, layout_b=layout_b), + lambda align: all([dim % align == 0 for dim in [M, N, K]]), + use_3xtf32, + profile_all_alignments=profile_all_alignments, + # TODO(masahi): Invesitigate when fp32 accumulation is needed for gemm + accumlator_dtype=out_dtype, + ) + + if not find_first_valid: + self.engine.compile_all(ops, use_multiprocessing) + + for op in ops: + out = self.engine.evaluate(op, [M, N, K]) + op["runtime"] = out + if out < float("inf") and find_first_valid: + self.cache[(M, N, K)] = op + return op + + op = min(ops, key=lambda i: i["runtime"]) + self.cache[(M, N, K)] = op + with open(self.cache_path, "wb") as f: + pickle.dump(self.cache, f) + return op + + def profile( + self, + op_type, + M, + N, + K, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32=True, + profile_all_alignments=False, + find_first_valid=False, + use_multiprocessing=False, + batched=False, + layout_b=LayoutType.ColumnMajor, + ): + """Profile and select the best kernel from candidate kernels. + If find_first_valid is True, return immediately after the first applicable kernel is found. + If use_multiprocessing is True, compile all profiler executables in parallel. + """ + op = self.select_op( + M, + N, + K, + out_dtype, + arg0_dtype, + arg1_dtype, + use_3xtf32, + profile_all_alignments=profile_all_alignments, + find_first_valid=find_first_valid, + use_multiprocessing=use_multiprocessing, + layout_b=layout_b, + ) + + name, opdef = create_gemm_operator_with_epilogue( + op_type, + op["tile_description"], + op["data_type"], + op["alignment"], + op["swizzle_functor"], + batched=batched, + layout_b=layout_b, + ) + + return name, opdef, op["runtime"] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_tensor_op.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_tensor_op.py new file mode 100644 index 0000000000000000000000000000000000000000..298d7895722c2fd0c3a7690e0473dc8eb7cec73e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/gen_tensor_op.py @@ -0,0 +1,904 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Common functions and classes for CUTLASS GEMM and Conv2d geneator.""" +import logging +import math +import multiprocessing +import os +import re +import subprocess +import tempfile + +import tvm._ffi +from tvm.runtime import Object +from tvm.tir import IntImm + +from . import _ffi_api as ffi +from .attention_operation import ( + instantiate_attention_template, + instantiate_flash_attention_template, + instantiate_flash_attention_var_len_template, +) +from .conv2d_operation import instantiate_conv2d_template +from .gemm_operation import instantiate_gemm_template, emit_fp16A_intB_matmul +from .layer_norm_operation import instantiate_layer_norm_template +from .rms_norm_operation import instantiate_rms_norm_template +from .library import ( + DataType, + DataTypeSize, + DataTypeTag, + EpilogueFunctor, + MathInstruction, + MathOperation, + OpcodeClass, + TileDescription, +) + +logger = logging.getLogger("cutlass") + + +dtype_map = { + "int8": DataType.s8, + "uint8": DataType.u8, + "int32": DataType.s32, + "float32": DataType.f32, + "float16": DataType.f16, +} + + +def generate_tensor_op_common( + math_instructions, alignment_constraints, get_tile_descriptions, op_creator +): + """Common kernel generator to be used by archtecture specific generators.""" + ops = [] + for math_inst in math_instructions: + tile_descriptions = get_tile_descriptions(math_inst) + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_c, + math_inst.element_accumulator, + ] + + out = op_creator(tile_descriptions, data_type, alignment_constraints) + + ops.extend(out) + + return ops + + +def generate_sm50_simt(out_dtype, arg0_dtype, arg1_dtype, op_creator, accumulator_dtype="float32"): + """Gemerate GEMM or Conv2D SIMT kernels""" + # pylint: disable=unused-argument + min_cc = 50 + max_cc = 1024 + if arg0_dtype == "float32" and arg1_dtype == "float32": + assert out_dtype == "float32" and accumulator_dtype == "float32" + math_instructions = [ + MathInstruction( + [1, 1, 1], + DataType.f32, + DataType.f32, + DataType.f32, + DataType.f32, + OpcodeClass.Simt, + MathOperation.multiply_add, + ) + ] + alignment_constraints = [1] + tile_descriptions = [ + ([128, 128, 8], 2, [4, 2, 1], min_cc, max_cc), + ([128, 64, 8], 2, [2, 2, 1], min_cc, max_cc), + ([64, 128, 8], 2, [2, 2, 1], min_cc, max_cc), + ([64, 64, 8], 2, [2, 1, 1], min_cc, max_cc), + ([128, 32, 8], 2, [2, 1, 1], min_cc, max_cc), + ([32, 128, 8], 2, [1, 2, 1], min_cc, max_cc), + ] + + def get_tile_descriptions(math_inst): + return [ + TileDescription(threadblock_shape, stages, warp_count, math_inst, min_cc, max_cc) + for threadblock_shape, stages, warp_count, min_cc, max_cc in tile_descriptions + ] + + return generate_tensor_op_common( + math_instructions, alignment_constraints, get_tile_descriptions, op_creator + ) + else: + raise NotImplementedError() + + +def generate_sm75_tensor_op_1688( + out_dtype, + arg0_dtype, + arg1_dtype, + op_creator, + check_align, + _, + profile_all_alignments=False, + accumlator_dtype="float32", +): + """Generate GEMM or Conv2D kernels for Turing.""" + assert out_dtype in ["float32", "float16", "int32"] + min_cc = 75 + max_cc = 1024 + + if arg0_dtype == "float16" and arg1_dtype == "float16": + math_instructions = [ + MathInstruction( + [16, 8, 8], + DataType.f16, + DataType.f16, + dtype_map[out_dtype], + dtype_map[accumlator_dtype], + OpcodeClass.TensorOp, + MathOperation.multiply_add, + ) + ] + alignment_constraints = [8, 4, 2, 1] + tile_descriptions = [ + ([256, 128, 32], 2, [4, 2, 1], min_cc, max_cc), + ([128, 256, 32], 2, [2, 4, 1], min_cc, max_cc), + ([128, 128, 32], 2, [2, 2, 1], min_cc, max_cc), + ([64, 128, 32], 2, [2, 2, 1], min_cc, max_cc), + ([128, 64, 32], 2, [2, 2, 1], min_cc, max_cc), + ([64, 64, 32], 2, [2, 2, 1], min_cc, max_cc), + ([64, 128, 64], 2, [1, 2, 2], min_cc, max_cc), + ] + + elif "int8" in arg0_dtype and "int8" in arg1_dtype: + assert out_dtype == "int32" + math_instructions = [ + MathInstruction( + [8, 8, 16], + dtype_map[arg0_dtype], + dtype_map[arg1_dtype], + DataType.s32, + DataType.s32, + OpcodeClass.TensorOp, + MathOperation.multiply_add_saturate, + ) + ] + alignment_constraints = [16, 8, 4, 2, 1] + tile_descriptions = [ + ([256, 128, 64], 2, [4, 2, 1], min_cc, max_cc), + ([128, 256, 64], 2, [2, 4, 1], min_cc, max_cc), + ([128, 128, 64], 2, [2, 2, 1], min_cc, max_cc), + ([64, 256, 64], 2, [1, 4, 1], min_cc, max_cc), + ([256, 64, 64], 2, [4, 1, 1], min_cc, max_cc), + ([64, 128, 64], 2, [2, 2, 1], min_cc, max_cc), + ([128, 64, 64], 2, [2, 2, 1], min_cc, max_cc), + ([64, 64, 64], 2, [2, 2, 1], min_cc, max_cc), + ] + elif arg0_dtype == "float32" and arg1_dtype == "float32" and out_dtype == "float32": + return generate_sm50_simt(out_dtype, arg0_dtype, arg1_dtype, op_creator, accumlator_dtype) + else: + raise NotImplementedError() + + alignment_constraints = [align for align in alignment_constraints if check_align(align)] + assert len(alignment_constraints) > 0 + + if not profile_all_alignments: + alignment_constraints = [alignment_constraints[0]] + + def get_tile_descriptions(math_inst): + return [ + TileDescription(threadblock_shape, stages, warp_count, math_inst, min_cc, max_cc) + for threadblock_shape, stages, warp_count, min_cc, max_cc in tile_descriptions + ] + + return generate_tensor_op_common( + math_instructions, alignment_constraints, get_tile_descriptions, op_creator + ) + + +def generate_sm80_tensor_op_16816( + out_dtype, + arg0_dtype, + arg1_dtype, + op_creator, + check_align, + use_3xtf32=True, + profile_all_alignments=False, + accumlator_dtype="float32", +): + """Generate GEMM or Conv2D kernels for Ampere.""" + min_cc = 80 + max_cc = 1024 + max_cc_smem_limited = 80 + + def get_default_tile_descriptions(block_k_factor): + return [ + ([128, 256, int(32 * block_k_factor)], 3, [2, 4, 1], min_cc, max_cc), + ([256, 128, int(32 * block_k_factor)], 3, [4, 2, 1], min_cc, max_cc), + ([256, 64, int(32 * block_k_factor)], 3, [4, 1, 1], min_cc, max_cc), + ([256, 64, int(32 * block_k_factor)], 4, [4, 1, 1], min_cc, max_cc), + ([64, 256, int(32 * block_k_factor)], 4, [1, 4, 1], min_cc, max_cc), + ([128, 128, int(32 * block_k_factor)], 3, [2, 2, 1], min_cc, max_cc), + ([128, 128, int(32 * block_k_factor)], 4, [2, 2, 1], min_cc, max_cc), + ([128, 128, int(32 * block_k_factor)], 5, [2, 2, 1], min_cc, max_cc), + ([128, 64, int(32 * block_k_factor)], 6, [2, 2, 1], min_cc, max_cc), + ([64, 128, int(32 * block_k_factor)], 6, [2, 2, 1], min_cc, max_cc), + ([64, 64, int(32 * block_k_factor)], 10, [2, 2, 1], min_cc, max_cc), + ([256, 128, int(64 * block_k_factor)], 3, [4, 2, 1], min_cc, max_cc_smem_limited), + ([128, 256, int(64 * block_k_factor)], 3, [2, 4, 1], min_cc, max_cc_smem_limited), + ([256, 64, int(64 * block_k_factor)], 4, [4, 1, 1], min_cc, max_cc_smem_limited), + ([64, 256, int(64 * block_k_factor)], 4, [1, 4, 1], min_cc, max_cc_smem_limited), + ([128, 128, int(64 * block_k_factor)], 4, [2, 2, 1], min_cc, max_cc), + ([256, 64, int(64 * block_k_factor)], 3, [4, 1, 1], min_cc, max_cc), + ([64, 256, int(64 * block_k_factor)], 3, [1, 4, 1], min_cc, max_cc), + ([128, 128, int(64 * block_k_factor)], 3, [2, 2, 1], min_cc, max_cc), + ([128, 64, int(64 * block_k_factor)], 3, [2, 2, 1], min_cc, max_cc), + ([64, 128, int(64 * block_k_factor)], 3, [2, 2, 1], min_cc, max_cc), + ([64, 64, int(64 * block_k_factor)], 5, [2, 2, 1], min_cc, max_cc), + ] + + if arg0_dtype == "float16" and arg1_dtype == "float16": + math_instructions = [ + MathInstruction( + [16, 8, 16], + DataType.f16, + DataType.f16, + dtype_map[out_dtype], + dtype_map[accumlator_dtype], + OpcodeClass.TensorOp, + MathOperation.multiply_add, + ) + ] + alignment_constraints = [8, 4, 2] + tile_descriptions = get_default_tile_descriptions(1) + elif arg0_dtype == "float32" and arg1_dtype == "float32": + math_instructions = [ + MathInstruction( + [16, 8, 8], + DataType.f32, + DataType.f32, + DataType.f32, + DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add_fast_f32 if use_3xtf32 else MathOperation.multiply_add, + ) + ] + alignment_constraints = [4, 2, 1] + + if use_3xtf32: + # tf32 + tile_descriptions = [ + ([128, 128, 16], 4, [4, 2, 1], min_cc, max_cc), + ([128, 128, 16], 3, [4, 2, 1], min_cc, max_cc), + ([256, 64, 16], 3, [4, 2, 1], min_cc, max_cc), + ([64, 256, 16], 3, [2, 4, 1], min_cc, max_cc), + ([128, 64, 16], 4, [2, 2, 1], min_cc, max_cc), + ([64, 128, 16], 4, [2, 2, 1], min_cc, max_cc), + ([64, 64, 16], 3, [2, 2, 1], min_cc, max_cc), + ([128, 128, 32], 3, [4, 2, 1], min_cc, max_cc), + ([256, 64, 32], 3, [4, 2, 1], min_cc, max_cc_smem_limited), + ([64, 256, 32], 3, [2, 4, 1], min_cc, max_cc_smem_limited), + ([128, 64, 32], 3, [2, 2, 1], min_cc, max_cc), + ([64, 128, 32], 3, [2, 2, 1], min_cc, max_cc), + ([64, 64, 32], 3, [2, 2, 1], min_cc, max_cc), + ] + else: + tile_descriptions = get_default_tile_descriptions(0.5) + else: + assert out_dtype == "int32" + math_instructions = [ + MathInstruction( + [16, 8, 32], + dtype_map[arg0_dtype], + dtype_map[arg1_dtype], + DataType.s32, + DataType.s32, + OpcodeClass.TensorOp, + MathOperation.multiply_add_saturate, + ) + ] + alignment_constraints = [16, 8, 4] + tile_descriptions = get_default_tile_descriptions(2) + + def get_tile_descriptions(math_inst): + return [ + TileDescription(threadblock_shape, stages, warp_count, math_inst, min_cc, max_cc) + for threadblock_shape, stages, warp_count, min_cc, max_cc in tile_descriptions + ] + + alignment_constraints = [align for align in alignment_constraints if check_align(align)] + + if len(alignment_constraints) > 0 and not profile_all_alignments: + alignment_constraints = [alignment_constraints[0]] + + if arg0_dtype != "float32" and arg1_dtype != "float32": + sm75_kernels = generate_sm75_tensor_op_1688( + out_dtype, + arg0_dtype, + arg1_dtype, + op_creator, + check_align, + False, + profile_all_alignments, + accumlator_dtype=accumlator_dtype, + ) + else: + # TF32 (float32 + float32 case) is only supported on sm80 + sm75_kernels = [] + + if len(alignment_constraints) > 0: + sm80_kernels = generate_tensor_op_common( + math_instructions, alignment_constraints, get_tile_descriptions, op_creator + ) + else: + sm80_kernels = [] + + # TODO(masahi): For int8 kernels, The CUTLASS generator modifies the output tensor alignment + # after ops are created. Revisit how important this modification is. + # for op in operations: + # if op.tile_description.threadblock_shape[1] >= 128: + # op.C.alignment = 16 + # else: + # op.C.alignment = 8 + + return sm75_kernels + sm80_kernels + + +GENERATOR_FUNC_TABLE = {75: generate_sm75_tensor_op_1688, 80: generate_sm80_tensor_op_16816} + + +# (Epilogue functor name, no_beta_scaling) +EPILOGUE_MAP = { + "cutlass.dense": (EpilogueFunctor.LinearCombination, False), + "cutlass.dense_bias": (EpilogueFunctor.LinearCombinationBias, True), + "cutlass.dense_bias_relu": (EpilogueFunctor.LinearCombinationRelu, True), + "cutlass.dense_bias_gelu_fp16": (EpilogueFunctor.LinearCombinationGelu, False), + "cutlass.dense_bias_gelu_fp32": (EpilogueFunctor.LinearCombinationGelu, False), + "cutlass.matmul": (EpilogueFunctor.LinearCombination, False), + "cutlass.matmul_bias": (EpilogueFunctor.LinearCombinationBias, True), + "cutlass.matmul_bias_relu": (EpilogueFunctor.LinearCombinationRelu, True), + "cutlass.matmul_bias_gelu": (EpilogueFunctor.LinearCombinationGelu, False), + "cutlass.matmul_transposed": (EpilogueFunctor.LinearCombination, False), + "cutlass.matmul_transposed_bias": (EpilogueFunctor.LinearCombinationBias, True), + "cutlass.matmul_transposed_bias_relu": (EpilogueFunctor.LinearCombinationRelu, True), + "cutlass.matmul_transposed_bias_gelu": (EpilogueFunctor.LinearCombinationGelu, False), + "cutlass.batch_matmul": (EpilogueFunctor.LinearCombination, False), + "cutlass.conv2d_bias_hardswish": (EpilogueFunctor.LinearCombinationHardSwish, False), + "cutlass.conv2d_bias_silu": (EpilogueFunctor.LinearCombinationSilu, False), + "cutlass.conv2d_bias_sigmoid": (EpilogueFunctor.LinearCombinationSigmoid, False), + "cutlass.conv2d_bias_relu": (EpilogueFunctor.LinearCombinationRelu, True), + "cutlass.conv2d_bias": (EpilogueFunctor.LinearCombinationBias, True), + "cutlass.conv2d": (EpilogueFunctor.LinearCombination, False), + "cutlass.conv2d_transpose": (EpilogueFunctor.LinearCombination, False), + "cutlass.conv2d_backward_weight": (EpilogueFunctor.LinearCombination, False), +} + + +class ProfilerEngine: + """Compile and run a given profiler executable.""" + + def __init__(self, cuda_arch, cutlass_path, binary_prefix): + self.cuda_arch = cuda_arch + self.binary_prefix = binary_prefix + self.cutlass = cutlass_path + self.cflags = f"-I{cutlass_path}/include -I{cutlass_path}/tools/util/include -O3 -std=c++17" + self.cflags += " -DCUTLASS_ENABLE_TENSOR_CORE_MMA=1" + self.cflags += ( + f" -gencode=arch=compute_{cuda_arch},code=[sm_{cuda_arch},compute_{cuda_arch}]" + ) + self.cflags += " -Xcompiler=-Wconversion -Xcompiler=-fno-strict-aliasing" + self.cmd = "nvcc {cflags} {src} -o {output}" + + def _compile(self, op): + os.makedirs(self.binary_prefix, exist_ok=True) + opath = os.path.join(self.binary_prefix, op["name"]) + if os.path.exists(opath): + return + fi = tempfile.NamedTemporaryFile("w", delete=False, prefix=self.binary_prefix, suffix=".cu") + fi.write(op["src"]) + fi.close() + cmd = self.cmd.format(cflags=self.cflags, src=fi.name, output=opath) + logger.info("invoking compilation %s", cmd) + os.system(cmd) + os.unlink(fi.name) + + def compile_all(self, ops, use_multiprocessing=False): + """Compile all profiler executables.""" + if use_multiprocessing: + pool = multiprocessing.Pool(multiprocessing.cpu_count()) + pool.map(self._compile, ops) + else: + for op in ops: + self._compile(op) + + def evaluate(self, op, args): + """Run the profiler executable corresponding to op_name with args.""" + op_name = op["name"] + opath = os.path.join(self.binary_prefix, op_name) + if not os.path.exists(opath): + self._compile(op) + if not os.path.exists(opath): + # Bail out if compilation fails for a whatever reason (e.g. static assert failure) + return float("inf") + cmd = [opath] + for arg in args: + cmd.append(str(arg)) + try: + logger.info("invoking evaluation %s", cmd) + sp = subprocess.run(cmd, capture_output=True, check=True) + rt = float(sp.stdout) + if rt == 0.0: + # This seems to happen with split-k using invalid split-k-slices + rt = float("inf") + logger.info("%s, %f", op_name, rt) + except subprocess.CalledProcessError: + rt = float("inf") + return rt + + +class CodegenResult(Object): + """The holder for the generated code and required headers.""" + + def __init__(self, code, headers): + self.__init_handle_by_constructor__(ffi.CodegenResult, code, headers) + + +def _get_optional_int_annotation(annotations, key, default=None): + value = annotations.get(key, default) + if value is None: + return default + return int(value) + + +@tvm._ffi.register_func("contrib.cutlass.instantiate_template") +def instantiate_template(func_name, annotations, func_args): + """Return CUTLASS host code based on a template and the provided annotations. + + Parameters + ---------- + func_name: str + A string to identify the type of the kernel (dense/matmul, batched_matmul, or conv2d). + + annotations: container.Map + Key and value pairs annotated during kernel selection. + + func_args: list + Names of the function arguments. + + Returns + ------- + codegen_result : CodegenResult + Generated CUTLASS host code and required header-file names. + """ + attrs = {} + + for k in ["lda", "ldb", "ldc", "cutlass_op_def", "cutlass_op_name", "op_type"]: + if k in annotations: + attrs[k] = annotations[k] + + headers = ["tvm/runtime/registry.h"] + + if "relu" in func_name: + headers.append("cutlass/epilogue/thread/linear_combination_bias_relu.h") + elif "gelu" in func_name: + headers.append("cutlass/epilogue/thread/linear_combination_gelu.h") + elif "sigmoid" in func_name: + headers.append("cutlass/epilogue/thread/linear_combination_sigmoid.h") + elif "silu" in func_name: + headers.append("cutlass/epilogue/thread/linear_combination_silu.h") + elif "hardswish" in func_name: + headers.append("cutlass/epilogue/thread/linear_combination_hardswish.h") + else: + headers.append("cutlass/epilogue/thread/linear_combination.h") + + if "residual" in func_name: + headers.append("cutlass/epilogue/thread/linear_combination_residual_block.h") + + def get_dim(shape_annot, var_name, axis_idx, batched_offset=0): + if isinstance(shape_annot, IntImm): + return str(int(shape_annot)) + return f"{var_name}->shape[{batched_offset + axis_idx}]" + + def get_batch_stride(stride_annot, arg0_idx, arg1_idx, arg0_axis_idx, arg1_axis_idx): + if isinstance(stride_annot, IntImm): + return str(int(stride_annot)) + dim1 = func_args[arg0_idx] + f"->shape[{arg0_axis_idx}]" + dim2 = func_args[arg1_idx] + f"->shape[{arg1_axis_idx}]" + return dim1 + " * " + dim2 + + def get_flattened_batch_dim(arg_name, batch_rank): + return " * ".join(["{}->shape[{}]".format(arg_name, i) for i in range(batch_rank)]) + + if "decode_matmul" in func_name: + headers.append("cutlass_kernels/fpA_intB_gemm.h") + lhs_arg_idx = _get_optional_int_annotation(annotations, "lhs_arg_idx", 0) + rhs_arg_idx = _get_optional_int_annotation(annotations, "rhs_arg_idx", 1) + scales_arg_idx = _get_optional_int_annotation(annotations, "scales_arg_idx", 2) + bias_arg_idx = _get_optional_int_annotation(annotations, "bias_arg_idx", None) + residual_arg_idx = _get_optional_int_annotation(annotations, "residual_arg_idx", None) + + attrs["A_arg"] = func_args[lhs_arg_idx] + attrs["B_arg"] = func_args[rhs_arg_idx] + attrs["scales_arg"] = func_args[scales_arg_idx] + attrs["activation"] = annotations.get("activation", "identity") + attrs["bias_stride"] = annotations["bias_stride"] + attrs["M"] = annotations["M"] + attrs["group_size"] = annotations["group_size"] + + if not isinstance(attrs["M"], tvm.tir.IntImm): + attrs["M"] = get_flattened_batch_dim( + func_args[lhs_arg_idx], int(annotations["batch_rank"]) + ) + + if bias_arg_idx is not None: + attrs["bias_arg"] = func_args[bias_arg_idx] + + if residual_arg_idx is not None: + attrs["residual_arg"] = func_args[residual_arg_idx] + attrs["binary_op"] = annotations["binary_op"] + attrs["unary_op"] = annotations["unary_op"] + + if annotations["weight_nbit"] == 4: + attrs["weight_dtype"] = "cutlass::uint4b_t" + attrs["float_per_int"] = 2 + else: + assert annotations["weight_nbit"] == 8 + attrs["weight_dtype"] = "uint8_t" + attrs["float_per_int"] = 1 + + code = emit_fp16A_intB_matmul(attrs) + return CodegenResult(code, headers) + + elif "dense" in func_name or "matmul" in func_name: + batched = "batch" in annotations + # dense is equal to transposed_matmul + transposed = "transposed" in func_name or "dense" in func_name + lhs_arg_idx = _get_optional_int_annotation(annotations, "lhs_arg_idx", 0) + rhs_arg_idx = _get_optional_int_annotation(annotations, "rhs_arg_idx", 1) + bias_arg_idx = _get_optional_int_annotation(annotations, "bias_arg_idx", None) + residual_arg_idx = _get_optional_int_annotation(annotations, "residual_arg_idx", None) + + lhs_arg = func_args[lhs_arg_idx] + rhs_arg = func_args[rhs_arg_idx] + lhs_shape = annotations[f"arg{lhs_arg_idx}_shape"] + rhs_shape = annotations[f"arg{rhs_arg_idx}_shape"] + lhs_batched_offset = len(lhs_shape) - 2 + rhs_batched_offset = len(rhs_shape) - 2 + + attrs["lhs_arg"] = lhs_arg + attrs["rhs_arg"] = rhs_arg + + if bias_arg_idx is not None: + attrs["bias_arg"] = func_args[bias_arg_idx] + if residual_arg_idx is not None: + attrs["residual_arg"] = func_args[residual_arg_idx] + + attrs["ElementInputA"] = DataTypeTag[dtype_map[annotations[f"arg{lhs_arg_idx}_dtype"]]] + attrs["ElementInputB"] = DataTypeTag[dtype_map[annotations[f"arg{rhs_arg_idx}_dtype"]]] + attrs["ElementOutput"] = DataTypeTag[dtype_map[annotations["ret_dtype"]]] + + attrs["K"] = lhs_shape[lhs_batched_offset + 1] + attrs["M"] = get_dim(lhs_shape[lhs_batched_offset], lhs_arg, 0, lhs_batched_offset) + + if transposed: + attrs["N"] = get_dim(rhs_shape[rhs_batched_offset], rhs_arg, 0, rhs_batched_offset) + else: + attrs["N"] = get_dim(rhs_shape[rhs_batched_offset + 1], rhs_arg, 1, rhs_batched_offset) + + if batched: + headers.append("cutlass/gemm/device/gemm_batched.h") + + def get_batch_on_arg(arg_name, arg_shape): + return " * ".join( + "{}->shape[{}]".format(arg_name, i) for i in range(len(arg_shape) - 2) + ) + + if isinstance(annotations["batch"], IntImm): + attrs["batch"] = str(int(annotations["batch"])) + elif annotations["batch_stride_A"] == 0: + # 2D x ND + attrs["batch"] = get_batch_on_arg(rhs_arg, rhs_shape) + else: + # ND x 2D or ND x ND + attrs["batch"] = get_batch_on_arg(lhs_arg, lhs_shape) + + attrs["batch_stride_A"] = get_batch_stride( + annotations["batch_stride_A"], + lhs_arg_idx, + lhs_arg_idx, + lhs_batched_offset, + lhs_batched_offset + 1, + ) + attrs["batch_stride_B"] = get_batch_stride( + annotations["batch_stride_B"], + rhs_arg_idx, + rhs_arg_idx, + rhs_batched_offset, + rhs_batched_offset + 1, + ) + + if transposed: + attrs["batch_stride_C"] = get_batch_stride( + annotations["batch_stride_C"], + lhs_arg_idx, + rhs_arg_idx, + lhs_batched_offset, + rhs_batched_offset, + ) + else: + attrs["batch_stride_C"] = get_batch_stride( + annotations["batch_stride_C"], + lhs_arg_idx, + rhs_arg_idx, + lhs_batched_offset, + rhs_batched_offset + 1, + ) + else: + headers.append("cutlass/gemm/device/gemm.h") + + if "residual" in func_name: + headers.append("cutlass/gemm/device/gemm_universal_with_broadcast.h") + + code = instantiate_gemm_template(attrs) + return CodegenResult(code, headers) + + elif "conv2d" in func_name: + data_arg_idx = _get_optional_int_annotation(annotations, "data_arg_idx", 0) + weight_arg_idx = _get_optional_int_annotation(annotations, "weight_arg_idx", 1) + bias_arg_idx = _get_optional_int_annotation(annotations, "bias_arg_idx", None) + residual_arg_idx = _get_optional_int_annotation(annotations, "residual_arg_idx", None) + + attrs["data_arg"] = func_args[data_arg_idx] + attrs["weight_arg"] = func_args[weight_arg_idx] + + if bias_arg_idx is not None: + attrs["bias_arg"] = func_args[bias_arg_idx] + if residual_arg_idx is not None: + attrs["residual_arg"] = func_args[residual_arg_idx] + + activation_shape = annotations[f"arg{data_arg_idx}_shape"] + weight_shape = annotations[f"arg{weight_arg_idx}_shape"] + output_shape = annotations["ret_shape"] + + if "conv2d_transpose" in func_name: + headers.append("cutlass/conv/kernel/default_conv2d_dgrad.h") + activation_shape = output_shape + output_shape = annotations["arg0_shape"] + elif "backward" in func_name: + headers.append("cutlass/conv/kernel/default_conv2d_wgrad.h") + activation_shape = annotations["arg1_shape"] + weight_shape = output_shape + output_shape = annotations["arg0_shape"] + elif "residual" in func_name: + headers.append("cutlass/conv/kernel/default_conv2d_fprop_with_broadcast.h") + else: + headers.append("cutlass/conv/kernel/default_conv2d_fprop.h") + + headers.append("cutlass/conv/device/implicit_gemm_convolution.h") + + op_name = attrs["cutlass_op_name"] + + if "splitk" in op_name: + headers += [ + "cutlass/reduction/device/reduce_split_k.h", + "cutlass/reduction/thread/reduction_operators.h", + ] + + data_arg = attrs["data_arg"] + attrs["N"] = get_dim(activation_shape[0], data_arg, 0) + attrs["H"] = get_dim(activation_shape[1], data_arg, 1) + attrs["W"] = get_dim(activation_shape[2], data_arg, 2) + attrs["C"] = activation_shape[3] + attrs["P"] = get_dim(output_shape[1], "out0", 1) + attrs["Q"] = get_dim(output_shape[2], "out0", 2) + attrs["K"] = output_shape[3] + attrs["R"] = weight_shape[1] + attrs["S"] = weight_shape[2] + attrs["pad_h"] = annotations["padding"][0] + attrs["pad_w"] = annotations["padding"][1] + attrs["stride_h"] = annotations["strides"][0] + attrs["stride_w"] = annotations["strides"][1] + attrs["dilation_h"] = annotations["dilation"][0] + attrs["dilation_w"] = annotations["dilation"][1] + + if "splitk" in op_name: + attrs["split_k_mode"] = "kParallel" + attrs["split_k_slices"] = str(re.search(r"splitk(\d+)", op_name).group(1)) + else: + attrs["split_k_mode"] = "kSerial" + attrs["split_k_slices"] = 1 + + if "residual_shape" in annotations: + attrs["residual_shape"] = annotations["residual_shape"] + + code = instantiate_conv2d_template(attrs) + return CodegenResult(code, headers) + + elif "attention" in func_name: + is_var_len = "var_len" in func_name + data_type = dtype_map[annotations["arg0_dtype"]] + + attrs["qkv_layout"] = annotations["qkv_layout"] + if attrs["qkv_layout"] == "default": + attrs["query"] = func_args[0] + attrs["key"] = func_args[1] + attrs["value"] = func_args[2] + attrs["num_queries"] = s = get_dim(annotations["num_queries"], func_args[0], 1) + attrs["num_keys"] = get_dim(annotations["num_keys"], func_args[1], 1) + if len(func_args) > 4 and not is_var_len: # +1 for workspace, the last arg + attrs["bias"] = func_args[3] + elif attrs["qkv_layout"] == "qkv_stacked": + attrs["qkv"] = func_args[0] + attrs["num_queries"] = s = annotations["num_queries"] + attrs["num_keys"] = annotations["num_keys"] + if len(func_args) > 5 and not is_var_len: # +1 for workspace, the last arg + attrs["bias"] = func_args[4] + else: + raise NotImplementedError() + + attrs["data_type"] = DataTypeTag[data_type] + attrs["num_batches"] = b = annotations["num_batches"] + attrs["head_dim"] = h = annotations["head_dim"] + attrs["head_dim_value"] = h_v = annotations["head_dim_value"] + attrs["kMaxK"] = max(int(attrs["head_dim"]), int(attrs["head_dim_value"])) + attrs["scale"] = ( + float(1 / math.sqrt(h.value)) if annotations["scale"] is None else annotations["scale"] + ) + + if is_var_len: + attrs["seqstart_q"] = func_args[int(annotations["seqstart_q_idx"])] + attrs["seqstart_k"] = func_args[int(annotations["seqstart_k_idx"])] + attrs["max_seqlen_q"] = func_args[int(annotations["max_seqlen_q_idx"])] + attrs["max_seqlen_k"] = func_args[int(annotations["max_seqlen_k_idx"])] + + is_mqa = annotations["num_q_heads"] != annotations["num_kv_heads"] + + use_flash = ( + annotations["ret_dtype"] == "float16" + and "bias" not in attrs + and int(attrs["head_dim"]) <= 256 + and int(attrs["head_dim"]) % 8 == 0 + and int(attrs["head_dim"]) == int(attrs["head_dim_value"]) + # For the causal case (custom mask = "BottomRight"), only use flash for multi-query + # attention workloads. Otherwise, CUTLASS fMHA seems faster for causal attention + # with a single query. + # In addition, sliding-window attention is only supported by flash. + and ( + int(annotations["custom_mask_type"]) == 0 + or (int(annotations["custom_mask_type"]) == 2 and is_mqa) + or (int(annotations["custom_mask_type"]) == 2 and "window_size" in annotations) + ) + # Flash v2 is currently not supported for sm < 80 + and int(annotations["arch"]) >= 80 + ) + + # See https://github.com/Dao-AILab/flash-attention/blob/ + # 92dd5703ecdb99aa4a4aee9817f28557907403a2/csrc/flash_attn/flash_api.cpp#L111-L116 + if "window_size" in annotations: + assert use_flash, "Sliding-window attention is supported only by Flash Attention." + assert ( + int(annotations["custom_mask_type"]) == 2 + ), "Sliding-window attention is only supported for causal with bottom right mask." + attrs["window_size_left"] = int(annotations["window_size"]) - 1 + attrs["window_size_right"] = 0 + attrs["is_causal"] = False + else: + if int(annotations["custom_mask_type"]) == 2: + attrs["window_size_left"] = attrs["num_keys"] + attrs["window_size_right"] = 0 + attrs["is_causal"] = True + else: + attrs["window_size_left"] = -1 + attrs["window_size_right"] = -1 + attrs["is_causal"] = False + + if use_flash: + headers.append("flash.h") + attrs["num_q_heads"] = annotations["num_q_heads"] + attrs["num_kv_heads"] = annotations["num_kv_heads"] + + if is_var_len: + code = instantiate_flash_attention_var_len_template(attrs) + else: + code = instantiate_flash_attention_template(attrs) + else: + headers.append("kernel_forward.h") + + assert ( + not is_mqa + ), "The number of query and KV heads need to be the same for CUTLASS fMHA." + + attrs["num_heads"] = n = annotations["num_q_heads"] + + data_type_size = DataTypeSize[data_type] + if (data_type_size * h // 8) % 16 == 0 and (data_type_size * h_v // 8) % 16 == 0: + attrs["kIsAligned"] = True + elif (h % 4 == 0) and (h_v % 4 == 0): + attrs["kIsAligned"] = False + else: + raise NotImplementedError() + if h_v > 64: + attrs["kQueriesPerBlock"] = 32 + attrs["kKeysPerBlock"] = 128 + attrs["kSingleValueIteration"] = h_v <= 128 + else: + attrs["kQueriesPerBlock"] = 64 + attrs["kKeysPerBlock"] = 64 + attrs["kSingleValueIteration"] = True + + assert ( + attrs["scale"] > 0 or attrs["scale"] < 0 + ), "Cutlass may generate nan occasionally when scale == 0.0" + attrs["arch"] = "cutlass::arch::Sm{}".format(annotations["arch"]) + attrs["kSupportsDropout"] = False + + attrs["output_size"] = f"{b} * {s} * {n} * {h_v}" + + attrs["custom_mask_type"] = annotations["custom_mask_type"] + + for arg in func_args: + if "workspace" in arg: + attrs["workspace"] = arg + if "bias" in attrs: + attrs["kSupportsBias"] = True + if len(annotations["bias_shape"]) == 4: + strides = "p.num_keys" + if annotations["bias_shape"][2] == 1: + attrs["bias_strideM"] = 0 + else: + attrs["bias_strideM"] = strides + strides = f"p.num_queries * {strides}" + if annotations["bias_shape"][1] == 1: + attrs["bias_strideH"] = 0 + else: + attrs["bias_strideH"] = strides + strides = f"p.num_heads * {strides}" + if annotations["bias_shape"][0] == 1: + attrs["bias_strideB"] = 0 + else: + attrs["bias_strideB"] = strides + else: + raise NotImplementedError() + else: + # To support negative scale in current Cutlass implementation, + # kSupportsBias should be set true, or there are nan's as result. + attrs["kSupportsBias"] = attrs["scale"] < 0 + + code = instantiate_attention_template(attrs) + + return CodegenResult(code, headers) + elif "layer_norm" in func_name: + headers.append("cutlass/util/device_layernorm.h") + headers.append("cutlass/layout/matrix.h") + attrs = {"input": func_args[0], "gamma": func_args[1], "beta": func_args[2]} + attrs.update(dict(annotations)) + + if not isinstance(attrs["M"], tvm.tir.IntImm): + attrs["M"] = get_flattened_batch_dim(func_args[0], int(attrs["batch_rank"])) + + code = instantiate_layer_norm_template(attrs) + return CodegenResult(code, headers) + elif "rms_norm" in func_name: + headers.append("cutlass/util/device_rmsnorm.h") + headers.append("cutlass/layout/matrix.h") + attrs = {"input": func_args[0], "weight": func_args[1]} + attrs.update(dict(annotations)) + + if not isinstance(attrs["M"], tvm.tir.IntImm): + attrs["M"] = get_flattened_batch_dim(func_args[0], int(attrs["batch_rank"])) + + code = instantiate_rms_norm_template(attrs) + return CodegenResult(code, headers) + + raise ValueError(f"Do not have a template for {func_name}") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/layer_norm_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/layer_norm_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..760ddec3bfc88303920b10e35ef97dd96e9cb2b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/layer_norm_operation.py @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Generator for CUTLASS layer norm kernels.""" +from .library import substitute_template + + +def instantiate_layer_norm_template(attrs): + """ + Return CUTLASS host code for layer norm based on + a template and the provided attribute map. + """ + template = """ + using data_type = ${data_type}; + using namespace cutlass::layout; + + int M = ${M}; + int N = ${N}; + cutlass::MatrixCoord size(M, N); + auto layout_2D = RowMajor::packed(size); + auto layout_channels = RowMajor::packed({1, N}); + + cutlass::TensorRef _input((data_type*)${input}->data, layout_2D); + cutlass::TensorRef _gamma((data_type*)${gamma}->data, layout_channels); + cutlass::TensorRef _beta((data_type*)${beta}->data, layout_channels); + cutlass::TensorRef _output((data_type*)out0->data, layout_2D); + + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + + cutlass::layernorm(size, _output, _input, _gamma, _beta, stream); + """ + return substitute_template(template, attrs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/library.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/library.py new file mode 100644 index 0000000000000000000000000000000000000000..2b8bb830e17e17b27da9c48fe115f1d56793285d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/library.py @@ -0,0 +1,304 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name,line-too-long +"""Various type definitions to help instantiate CUTLASS kernels.""" +import re +import enum +from enum import auto as enum_auto + +from tvm.tir.expr import IntImm, FloatImm + + +class GeneratorTarget(enum.Enum): + Library = enum_auto() + + +class DataType(enum.Enum): + f16 = enum_auto() + f32 = enum_auto() + s8 = enum_auto() + u8 = enum_auto() + s32 = enum_auto() + + +ShortDataTypeNames = {DataType.f16: "h", DataType.f32: "s", DataType.s32: "i"} + + +DataTypeNames = { + DataType.f16: "f16", + DataType.f32: "f32", + DataType.s8: "s8", + DataType.u8: "u8", + DataType.s32: "s32", +} + +DataTypeTag = { + DataType.f16: "cutlass::half_t", + DataType.f32: "float", + DataType.s8: "int8_t", + DataType.s32: "int32_t", + DataType.u8: "uint8_t", +} + +DataTypeSize = { + DataType.f16: 16, + DataType.f32: 32, + DataType.u8: 8, + DataType.s8: 8, + DataType.s32: 32, +} + + +class MathOperation(enum.Enum): + multiply_add = enum_auto() + multiply_add_saturate = enum_auto() + multiply_add_fast_f32 = enum_auto() + + +MathOperationTag = { + MathOperation.multiply_add: "cutlass::arch::OpMultiplyAdd", + MathOperation.multiply_add_saturate: "cutlass::arch::OpMultiplyAddSaturate", + MathOperation.multiply_add_fast_f32: "cutlass::arch::OpMultiplyAddFastF32", +} + + +class LayoutType(enum.Enum): + ColumnMajor = enum_auto() + RowMajor = enum_auto() + TensorNHWC = enum_auto() + + +LayoutTag = { + LayoutType.ColumnMajor: "cutlass::layout::ColumnMajor", + LayoutType.RowMajor: "cutlass::layout::RowMajor", + LayoutType.TensorNHWC: "cutlass::layout::TensorNHWC", +} + + +TransposedLayout = { + LayoutType.ColumnMajor: LayoutType.RowMajor, + LayoutType.RowMajor: LayoutType.ColumnMajor, + LayoutType.TensorNHWC: LayoutType.TensorNHWC, +} + + +ShortLayoutTypeNames = { + LayoutType.ColumnMajor: "n", + LayoutType.RowMajor: "t", + LayoutType.TensorNHWC: "nhwc", +} + + +class OpcodeClass(enum.Enum): + Simt = enum_auto() + TensorOp = enum_auto() + WmmaTensorOp = enum_auto() + + +OpcodeClassNames = { + OpcodeClass.Simt: "simt", + OpcodeClass.TensorOp: "tensorop", + OpcodeClass.WmmaTensorOp: "wmma_tensorop", +} + +OpcodeClassTag = { + OpcodeClass.Simt: "cutlass::arch::OpClassSimt", + OpcodeClass.TensorOp: "cutlass::arch::OpClassTensorOp", + OpcodeClass.WmmaTensorOp: "cutlass::arch::OpClassWmmaTensorOp", +} + + +class OperationKind(enum.Enum): + Gemm = enum_auto() + Conv2d = enum_auto() + + +OperationKindNames = {OperationKind.Gemm: "gemm", OperationKind.Conv2d: "conv2d"} + + +class Target(enum.Enum): + library = enum_auto() + + +def substitute_template(template, values): + """Instantiate a kernel template using `values`.""" + text = template + changed = True + while changed: + changed = False + for key, value in values.items(): + if isinstance(value, (int, IntImm)): + value = str(int(value)) + if isinstance(value, (float, FloatImm)): + value = str(float(value)) + elif isinstance(value, bool): + value = str(value).lower() + regex = f"\\$\\{{{key}\\}}" + newtext = re.sub(regex, value, text) + if newtext != text: + changed = True + text = newtext + return text + + +class GemmKind(enum.Enum): + Gemm = enum_auto() + + +GemmKindNames = {GemmKind.Gemm: "gemm"} + + +class EpilogueFunctor(enum.Enum): + LinearCombination = enum_auto() + LinearCombinationRelu = enum_auto() + LinearCombinationBias = enum_auto() + LinearCombinationGelu = enum_auto() + LinearCombinationSigmoid = enum_auto() + LinearCombinationSilu = enum_auto() + LinearCombinationHardSwish = enum_auto() + LinearCombinationResidualBlock = enum_auto() + + +EpilogueFunctorTag = { + EpilogueFunctor.LinearCombination: "cutlass::epilogue::thread::LinearCombination", + EpilogueFunctor.LinearCombinationRelu: "cutlass::epilogue::thread::LinearCombinationRelu", + EpilogueFunctor.LinearCombinationBias: "cutlass::epilogue::thread::LinearCombination", + EpilogueFunctor.LinearCombinationGelu: "cutlass::epilogue::thread::LinearCombinationGELU", + EpilogueFunctor.LinearCombinationSigmoid: "cutlass::epilogue::thread::LinearCombinationSigmoid", + EpilogueFunctor.LinearCombinationSilu: "cutlass::epilogue::thread::LinearCombinationSilu", + EpilogueFunctor.LinearCombinationHardSwish: "cutlass::epilogue::thread::LinearCombinationHardSwish", + EpilogueFunctor.LinearCombinationResidualBlock: "cutlass::epilogue::thread::LinearCombinationResidualBlock", +} + + +class SwizzlingFunctor(enum.Enum): + Identity1 = enum_auto() + Identity2 = enum_auto() + Identity4 = enum_auto() + Identity8 = enum_auto() + Batched = enum_auto() + StridedDgradIdentity1 = enum_auto() + StridedDgradIdentity4 = enum_auto() + + +SwizzlingFunctorTag = { + SwizzlingFunctor.Identity1: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>", + SwizzlingFunctor.Identity2: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>", + SwizzlingFunctor.Identity4: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>", + SwizzlingFunctor.Identity8: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>", + SwizzlingFunctor.Batched: "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle", + SwizzlingFunctor.StridedDgradIdentity1: "cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>", + SwizzlingFunctor.StridedDgradIdentity4: "cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>", +} + + +class ConvKind(enum.Enum): + Fprop = enum_auto() + Dgrad = enum_auto() + Wgrad = enum_auto() + + +ConvKindTag = { + ConvKind.Fprop: "cutlass::conv::Operator::kFprop", + ConvKind.Dgrad: "cutlass::conv::Operator::kDgrad", + ConvKind.Wgrad: "cutlass::conv::Operator::kWgrad", +} + + +ConvKindNames = {ConvKind.Fprop: "fprop", ConvKind.Dgrad: "dgrad", ConvKind.Wgrad: "wgrad"} + + +class StrideSupport(enum.Enum): + Strided = enum_auto() + Unity = enum_auto() + + +StrideSupportTag = { + StrideSupport.Strided: "cutlass::conv::StrideSupport::kStrided", + StrideSupport.Unity: "cutlass::conv::StrideSupport::kUnity", +} + + +StrideSupportNames = {StrideSupport.Strided: "", StrideSupport.Unity: "unity_stride"} + + +class IteratorAlgorithm(enum.Enum): + Analytic = enum_auto() + Optimized = enum_auto() + + +IteratorAlgorithmTag = { + IteratorAlgorithm.Analytic: "cutlass::conv::IteratorAlgorithm::kAnalytic", + IteratorAlgorithm.Optimized: "cutlass::conv::IteratorAlgorithm::kOptimized", +} + + +IteratorAlgorithmNames = { + IteratorAlgorithm.Analytic: "analytic", + IteratorAlgorithm.Optimized: "optimized", +} + + +class MathInstruction: + """Describe characteristics of a math instruction.""" + + def __init__( + self, + instruction_shape, + element_a, + element_b, + element_c, + element_accumulator, + opcode_class, + math_operation=MathOperation.multiply_add, + ): + self.instruction_shape = instruction_shape + self.element_a = element_a + self.element_b = element_b + self.element_c = element_c + self.element_accumulator = element_accumulator + self.opcode_class = opcode_class + self.math_operation = math_operation + + +class TileDescription: + """Describe characteristics of a GEMM tile.""" + + def __init__( + self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute + ): + self.threadblock_shape = threadblock_shape + self.stages = stages + self.warp_count = warp_count + self.math_instruction = math_instruction + self.minimum_compute_capability = min_compute + self.maximum_compute_capability = max_compute + + def procedural_name(self): + return "%dx%d_%dx%d" % ( + self.threadblock_shape[0], + self.threadblock_shape[1], + self.threadblock_shape[2], + self.stages, + ) + + +class TensorDescription: + def __init__(self, element, layout, alignment=1): + self.element = element + self.layout = layout + self.alignment = alignment diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/rms_norm_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/rms_norm_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..9b10620fd4031f56190ca415d7011351618db634 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/cutlass/rms_norm_operation.py @@ -0,0 +1,47 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Generator for CUTLASS rms norm kernels.""" +from .library import substitute_template + + +def instantiate_rms_norm_template(attrs): + """ + Return CUTLASS host code for rms norm based on + a template and the provided attribute map. + """ + template = """ + using data_type = ${data_type}; + using namespace cutlass::layout; + + int M = ${M}; + int N = ${N}; + cutlass::MatrixCoord size(M, N); + auto layout_2D = RowMajor::packed(size); + auto layout_channels = RowMajor::packed({1, N}); + + cutlass::TensorRef _input((data_type*)${input}->data, layout_2D); + cutlass::TensorRef _weight((data_type*)${weight}->data, layout_channels); + cutlass::TensorRef _output((data_type*)out0->data, layout_2D); + + auto func = tvm::runtime::Registry::Get("runtime.get_cuda_stream"); + ICHECK(func != nullptr); + cudaStream_t stream = static_cast((*func)().operator void*()); + + cutlass::rmsnorm(size, _output, _input, _weight, stream, ${rms_eps}); + """ + return substitute_template(template, attrs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..13a83393a9124bf6ec36540556b4808abd47e206 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08862b69cbabb2d02bf9d106ad74fdd887a3f435 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7065cb65c5128f0719b7bb9026e9e45e62e965c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_executor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_result.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_result.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06805830c0b561f323ac77e80a65a3b872890940 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_result.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_runtime.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_runtime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12bff0f2a3b65f8b85d8dc604e83887be50f67fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/__pycache__/debug_runtime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_executor.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..785959ce8dd789387240275d9335c54dbda561b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_executor.py @@ -0,0 +1,512 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Graph debug runtime executes TVM debug packed functions.""" + +import logging +import os +import shutil +import struct +import tempfile + +import tvm._ffi +from tvm._ffi.base import string_types +from tvm.contrib import graph_executor +from tvm.runtime.module import BenchmarkResult + +from ...runtime.profiling import Report +from . import debug_result + +_DUMP_ROOT_PREFIX = "tvmdbg_" +_DUMP_PATH_PREFIX = "_tvmdbg_" + + +def create(graph_json_str, libmod, device, dump_root=None): + """Create a runtime executor module given a graph and module. + + Parameters + ---------- + graph_json_str : str + The graph to be deployed in json format output by graph compiler. + The graph can contain operator(tvm_op) that points to the name + of PackedFunc in the libmod. + + libmod : tvm.Module + The module of the corresponding function. + + device : Device + The device to deploy the module, can be local or remote. + + dump_root : str + To select which folder the outputs should be kept. + None will make a temp folder in /tmp/tvmdbg and does the dumping + Returns + ------- + graph_module : GraphModuleDebug + Debug Runtime graph module that can be used to execute the graph. + """ + assert isinstance(graph_json_str, string_types) + + try: + dev, num_rpc_dev, device_type_id = graph_executor.get_device(libmod, device) + if num_rpc_dev == len(dev): + fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor_debug.create") + else: + fcreate = tvm._ffi.get_global_func("tvm.graph_executor_debug.create") + except ValueError: + raise ValueError( + "Please set '(USE_PROFILER ON)' in " "config.cmake and rebuild TVM to enable debug mode" + ) + func_obj = fcreate(graph_json_str, libmod, *device_type_id) + gmod = GraphModuleDebug(func_obj, dev, graph_json_str, dump_root) + + # Automatically set params if they can be extracted from the libmod + try: + params = libmod["get_graph_params"]() + if isinstance(params, tvm.ir.container.Map): + gmod.set_input(**params) + except (AttributeError, tvm.error.RPCError): + # Params can not be extracted from the libmod and must be set somewhere else manually + # Do not set params during RPC communication + pass + + return gmod + + +class GraphModuleDebug(graph_executor.GraphModule): + """Graph debug runtime module. + + This is a debug wrapper over the TVM runtime. + Runtime interfaces are wrapped with debug functionalities. + Manage the debug framework to format the debug data and + trigger the user interfaces. + + Parameters + ---------- + module : Module + The internal tvm module that holds the actual graph functions. + + device : Device + The device that this module is under. + + graph_json_str : str or graph class + Content of graph json file in string format + + dump_root : str + To select which folder the outputs should be kept. + None will make a temp folder in /tmp/tvmdbg and does the dumping + """ + + def __init__(self, module, device, graph_json_str, dump_root): + self._dump_root = dump_root + self._dump_path = None + self._run_individual = module["run_individual"] + self._run_individual_node = module["run_individual_node"] + self._debug_get_output = module["debug_get_output"] + self._execute_node = module["execute_node"] + self._get_node_output = module["get_node_output"] + self._profile = module["profile"] + self._profile_rpc = module["profile_rpc"] + graph_executor.GraphModule.__init__(self, module) + self._create_debug_env(graph_json_str, device) + + def _format_device(self, device): + return str(device[0]).upper().replace("(", ":").replace(")", "") + + def _ensure_dir(self, directory): + """Create a directory if not exists + + Parameters + ---------- + + directory : str + File path to create + """ + if not os.path.exists(directory): + os.makedirs(directory, 0o700) + + def _get_dump_path(self, device): + """Make the graph and tensor dump folder and return the path. + + Parameters + ---------- + device : Device + The device that this module is under. + + Returns + ------- + path : str + Directory path where the graph and node outputs will be stored. + """ + # save to file + folder_name = _DUMP_PATH_PREFIX + "device_" + folder_name = folder_name + device.replace(":", "_") + path = os.path.join(self._dump_root, folder_name) + self._ensure_dir(path) + return path + + def _remove_dump_root(self): + if os.path.isdir(self._dump_root): + shutil.rmtree(self._dump_root) + + def _create_debug_env(self, graph_json, device): + """Create UI wrapper framework to handle multiple UI frontends for tvmdbg + + Parameters + ---------- + graph_json : json format + json formatted NNVM graph contain list of each node's name, shape and type. + + nodes_list : list + List of all the nodes presented in the graph + + device : Device + The device that this module is under. + """ + # make the dump folder if not given + if not self._dump_root: + self._dump_root = tempfile.mkdtemp(prefix=_DUMP_ROOT_PREFIX) + + # format the device + device = self._format_device(device) + + # updates the dumping directories + self._dump_path = self._get_dump_path(device) + + # init the debug dumping environment + self.debug_datum = debug_result.DebugResult(graph_json, self._dump_path) + + def _execute_next_node(self, node_index, output_index): + """Execute node assuming all previous nodes has been executed. + Return the output of this node. + + Parameters + ---------- + node_index : int + The node index + output_index: int + The node output index + Return + ------ + output_tensors : Array + Array of output tensors + """ + output_tensors = self._execute_next_node_get_output(node_index, output_index) + return output_tensors + + def _run_per_layer(self): + """Execute up to each node and each debug output will be + copied to the buffer. + + """ + output_tensors = [] + for i, node in enumerate(self.debug_datum.get_graph_nodes()): + self._execute_node(i) + num_outputs = self.debug_datum.get_graph_node_output_num(node) + for j in range(num_outputs): + logging.info( + "running node=%d, output_ind=%d, with node_name: %s", i, j, node["name"] + ) + output_tensors.append(self._get_node_output(i, j)) + self.debug_datum.update_output_tensors(output_tensors) + + def _run_debug( + self, + number, + repeat, + min_repeat_ms, + limit_zero_time_iterations, + cooldown_interval_ms, + repeats_to_cooldown, + ): + """Execute the node specified with index will be executed. + Each debug output will be copied to the buffer + Time consumed for each execution will be set as debug output. + """ + # Get timing. + self.debug_datum._time_list = self.run_individual( + number=number, + repeat=repeat, + min_repeat_ms=min_repeat_ms, + limit_zero_time_iterations=limit_zero_time_iterations, + cooldown_interval_ms=cooldown_interval_ms, + repeats_to_cooldown=repeats_to_cooldown, + ) + + # Get outputs. + self._run_per_layer() + + def debug_get_output(self, node, out=None): + """Run graph up to node and get the output to out + + Parameters + ---------- + node : int / str + The node index or name + + out : NDArray + The output array container + """ + if isinstance(node, str): + node_index = None + for i, graph_node in enumerate(self.debug_datum.get_graph_nodes()): + if graph_node["name"] == node: + node_index = i + break + else: + raise AttributeError(f"Could not find a node named {node} in this graph.") + elif isinstance(node, int): + node_index = node + else: + raise RuntimeError("Require node index or name only.") + if out: + self._debug_get_output(node_index, out) + return out + return self._debug_get_output(node_index) + + # pylint: disable=arguments-differ + def run( + self, + number=10, + repeat=1, + min_repeat_ms=1, + limit_zero_time_iterations=100, + cooldown_interval_ms=0, + repeats_to_cooldown=1, + sort_by_time=True, + **input_dict, + ): + """Run forward execution of the graph with debug + + Parameters + ---------- + number: int, optional + The number of times to run this function for taking average. + We call these runs as one `repeat` of measurement. + + repeat: int, optional + The number of times to repeat the measurement. + In total, the function will be invoked (1 + number x repeat) times, + where the first one is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + + min_repeat_ms: int, optional + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + + limit_zero_time_iterations: int, optional + The maximum number of repeats when measured time is equal to 0. + It helps to avoid hanging during measurements. + + cooldown_interval_ms: int, optional + The cooldown interval in milliseconds between the number of repeats defined by + `repeats_to_cooldown`. + + repeats_to_cooldown: int, optional + The number of repeats before the cooldown is activated. + + sort_by_time: bool, optional + Whether to sort the debug output by time. + + input_dict : dict of str to NDArray + List of input values to be feed to + """ + if input_dict: + self.set_input(**input_dict) + + # Step 1. Execute the graph + self._run_debug( + number=number, + repeat=repeat, + min_repeat_ms=min_repeat_ms, + limit_zero_time_iterations=limit_zero_time_iterations, + cooldown_interval_ms=cooldown_interval_ms, + repeats_to_cooldown=repeats_to_cooldown, + ) + # Step 2. Dump the output tensors to the dump folder + self.debug_datum.dump_output_tensor() + # Step 3. Dump the Chrome trace to the dump folder + self.debug_datum.dump_chrome_trace() + # Step 4. Display the collected information + self.debug_datum.display_debug_result(sort_by_time) + + def run_individual( + self, + number, + repeat=1, + min_repeat_ms=0, + limit_zero_time_iterations=100, + cooldown_interval_ms=0, + repeats_to_cooldown=1, + ): + """Run each operation in the graph and get the time per op for all ops. + + number: int + The number of times to run this function for taking average. + We call these runs as one `repeat` of measurement. + + repeat: int, optional + The number of times to repeat the measurement. + In total, the function will be invoked (1 + number x repeat) times, + where the first one is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + + min_repeat_ms: int, optional + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + + limit_zero_time_iterations: int, optional + The maximum number of repeats when measured time is equal to 0. + It helps to avoid hanging during measurements. + + cooldown_interval_ms: int, optional + The cooldown interval in milliseconds between the number of repeats defined by + `repeats_to_cooldown`. + + repeats_to_cooldown: int, optional + The number of repeats before the cooldown is activated. + + Returns + ------- + A 2-dimensional array where the dimensions are: the index of the operation and + the repeat of the measurement. + """ + res = self._run_individual( + number, + repeat, + min_repeat_ms, + limit_zero_time_iterations, + cooldown_interval_ms, + repeats_to_cooldown, + ) + results = [] + offset = 0 + format_size = "@q" + (nodes_count,) = struct.unpack_from(format_size, res, offset) + offset += struct.calcsize(format_size) + format_data = "@" + repeat * "d" + for _ in range(0, nodes_count): + ret = struct.unpack_from(format_data, res, offset) + offset += struct.calcsize(format_data) + results.append([*ret]) + return results + + def run_individual_node( + self, + index, + number=10, + repeat=1, + min_repeat_ms=0, + limit_zero_time_iterations=100, + cooldown_interval_ms=0, + repeats_to_cooldown=1, + ): + """Benchmark a single node in the serialized graph. + + This does not do any data transfers and uses arrays already on the device. + + Parameters + ---------- + index : int + The index of the node, see `self.debug_datum.get_graph_nodes` + + number: int + The number of times to run this function for taking average. + We call these runs as one `repeat` of measurement. + + repeat: int, optional + The number of times to repeat the measurement. + In total, the function will be invoked (1 + number x repeat) times, + where the first one is warm up and will be discarded. + The returned result contains `repeat` costs, + each of which is an average of `number` costs. + + min_repeat_ms : int, optional + The minimum duration of one `repeat` in milliseconds. + By default, one `repeat` contains `number` runs. If this parameter is set, + the parameters `number` will be dynamically adjusted to meet the + minimum duration requirement of one `repeat`. + i.e., When the run time of one `repeat` falls below this time, the `number` parameter + will be automatically increased. + + limit_zero_time_iterations: int, optional + The maximum number of repeats when measured time is equal to 0. + It helps to avoid hanging during measurements. + + cooldown_interval_ms: int, optional + The cooldown interval in milliseconds between the number of repeats defined by + `repeats_to_cooldown`. + + repeats_to_cooldown: int, optional + The number of repeats before the cooldown is activated. + + Returns + ------- + A module BenchmarkResult + """ + # Results are returned as serialized strings which we deserialize + res = self._run_individual_node( + index, + number, + repeat, + min_repeat_ms, + limit_zero_time_iterations, + cooldown_interval_ms, + repeats_to_cooldown, + ) + fmt = "@" + ("d" * repeat) + results = struct.unpack(fmt, res) + return BenchmarkResult(list(results)) + + def profile(self, collectors=None, **input_dict): + """Run forward execution of the graph and collect overall and per-op + performance metrics. + + Parameters + ---------- + collectors : Optional[Sequence[MetricCollector]] + Extra metrics to collect. If profiling over RPC, collectors must be `None`. + + input_dict : dict of str to NDArray + List of input values to be feed to + + Return + ------ + timing_results : str + Per-operator and whole graph timing results in a table format. + """ + if input_dict: + self.set_input(**input_dict) + + if self.module.type_key == "rpc": + # We cannot serialize MetricCollectors over RPC + assert collectors is None, "Profiling with collectors is not supported over RPC" + return Report.from_json(self._profile_rpc()) + return self._profile(collectors) + + def exit(self): + """Exits the dump folder and all its contents""" + self._remove_dump_root() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_result.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_result.py new file mode 100644 index 0000000000000000000000000000000000000000..45caf41e7e585e1b1772c3c20ca6a64108c62158 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_result.py @@ -0,0 +1,297 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=pointless-exception-statement, unnecessary-list-index-lookup +"""Graph debug results dumping class.""" +import collections +import json +import os + +import numpy as np +import tvm + +GRAPH_DUMP_FILE_NAME = "_tvmdbg_graph_dump.json" +CHROME_TRACE_FILE_NAME = "_tvmdbg_execution_trace.json" + +ChromeTraceEvent = collections.namedtuple("ChromeTraceEvent", ["ts", "tid", "pid", "name", "ph"]) + + +class DebugResult(object): + """Graph debug data module. + + Data dump module manage all the debug data formatting. + Output data and input graphs are formatted and dumped to file. + Frontend read these data and graph for visualization. + + Parameters + ---------- + graph_json : str + The graph to be deployed in json format output by graph compiler. Each operator (tvm_op) + in the graph will have a one to one mapping with the symbol in libmod which is used + to construct a "PackedFunc" . + + dump_path : str + Output data path is read/provided from frontend + """ + + def __init__(self, graph_json, dump_path): + self._dump_path = dump_path + self._output_tensor_list = [] + self._time_list = [] + json_obj = self._parse_graph(graph_json) + # dump the json information + self._dump_graph_json(json_obj) + + def _parse_graph(self, graph_json): + """Parse and extract the JSON graph and update the nodes, shapes and dltype. + + Parameters + ---------- + graph_json : str or graph class + The graph to be deployed in json format output by JSON graph. + """ + json_obj = json.loads(graph_json) + self._nodes_list = json_obj["nodes"] + self._shapes_list = json_obj["attrs"]["shape"] + self._dtype_list = json_obj["attrs"]["dltype"] + self._update_graph_json() + return json_obj + + def _update_graph_json(self): + """update the nodes_list with name, shape and data type, + for temporarily storing the output. + """ + eid = 0 + for node in self._nodes_list: + input_list = [] + if node["op"] == "null": + node["attrs"] = {} + node["op"] = "param" + num_outputs = 1 + elif node["op"] == "tvm_op": + for input_node in node["inputs"]: + input_list.append(self._nodes_list[input_node[0]]["name"]) + node["op"] = node["attrs"]["func_name"] + num_outputs = int(node["attrs"]["num_outputs"]) + else: + raise ValueError("") + node["inputs"] = input_list + dtype = str("type: " + self._dtype_list[1][eid]) + node["attrs"].update({"T": dtype}) + node["shape"] = self._shapes_list[1][eid] + eid += num_outputs + + def _cleanup_tensors(self): + """Remove the tensor dump file (graph wont be removed)""" + for filename in os.listdir(self._dump_path): + if os.path.isfile(filename) and not filename.endswith(".json"): + os.remove(filename) + + def get_graph_nodes(self): + """Return the nodes list""" + return self._nodes_list + + def get_graph_node_shapes(self): + """Return the nodes shapes list""" + return self._shapes_list + + def get_graph_node_output_num(self, node): + """Return the number of outputs of a node""" + return 1 if node["op"] == "param" else int(node["attrs"]["num_outputs"]) + + def get_graph_node_dtypes(self): + """Return the nodes dtype list""" + return self._dtype_list + + def get_output_tensors(self): + """Get the output tensors of each operation in numpy format""" + eid = 0 + output_tensors = {} + for i, node in enumerate(self._nodes_list): + num_outputs = self.get_graph_node_output_num(node) + for j in range(num_outputs): + + # the node name is not unique, so we need a consistent + # indexing based on the list ordering in the nodes + key = f"{node['name']}____topo-index:{i}____output-num:{j}" + output_tensors[key] = self._output_tensor_list[eid] + eid += 1 + return output_tensors + + def update_output_tensors(self, tensors): + """Update output tensors list + + Parameters + ---------- + tensors : list[NDArray] + """ + if not isinstance(tensors, list): + AttributeError("tensors with incorrect type.") + + for output_array in tensors: + self._output_tensor_list.append(output_array) + + def dump_output_tensor(self): + """Dump the outputs to a temporary folder, the tensors are in numpy format""" + # cleanup existing tensors before dumping + self._cleanup_tensors() + output_tensors = self.get_output_tensors() + + with open(os.path.join(self._dump_path, "output_tensors.params"), "wb") as param_f: + param_f.write(save_tensors(output_tensors)) + + def dump_chrome_trace(self): + """Dump the trace to the Chrome trace.json format.""" + + def s_to_us(t): + return t * 10**6 + + starting_times = np.zeros(len(self._time_list) + 1) + starting_times[1:] = np.cumsum([np.mean(times) for times in self._time_list]) + + def node_to_events(node, times, starting_time): + return [ + ChromeTraceEvent( + ts=s_to_us(starting_time), + tid=1, + pid=1, + ph="B", + name=node["name"], + ), + ChromeTraceEvent( + # Use start + duration instead of end to ensure precise timings. + ts=s_to_us(np.mean(times) + starting_time), + tid=1, + pid=1, + ph="E", + name=node["name"], + ), + ] + + events = [ + e + for (node, times, starting_time) in zip( + self._nodes_list, self._time_list, starting_times + ) + for e in node_to_events(node, times, starting_time) + ] + result = dict(displayTimeUnit="ns", traceEvents=[e._asdict() for e in events]) + + with open(os.path.join(self._dump_path, CHROME_TRACE_FILE_NAME), "w") as trace_f: + json.dump(result, trace_f) + + def _dump_graph_json(self, graph): + """Dump json formatted graph. + + Parameters + ---------- + graph : json format + json formatted JSON graph contain list of each node's + name, shape and type. + """ + graph_dump_file_name = GRAPH_DUMP_FILE_NAME + with open(os.path.join(self._dump_path, graph_dump_file_name), "w") as outfile: + json.dump(graph, outfile, indent=4, sort_keys=False) + + def get_debug_result(self, sort_by_time=True): + """Return the debugger result""" + header = [ + "Node Name", + "Ops", + "Time(us)", + "Time(%)", + "Shape", + "Inputs", + "Outputs", + "Measurements(us)", + ] + lines = [ + "---------", + "---", + "--------", + "-------", + "-----", + "------", + "-------", + "----------------", + ] + eid = 0 + data = [] + total_time = sum([np.mean(time) for time in self._time_list]) + for node, time in zip(self._nodes_list, self._time_list): + time_mean = np.mean(time) + num_outputs = self.get_graph_node_output_num(node) + for j in range(num_outputs): + op = node["op"] + if node["op"] == "param": + eid += 1 + continue + name = node["name"] + shape = str(self._output_tensor_list[eid].shape) + time_us = round(time_mean * 1e6, 3) + time_percent = round(((time_mean / total_time) * 100), 3) + inputs = str(node["attrs"]["num_inputs"]) + outputs = str(node["attrs"]["num_outputs"]) + measurements = str([round(repeat_data * 1e6, 3) for repeat_data in time]) + node_data = [name, op, time_us, time_percent, shape, inputs, outputs, measurements] + data.append(node_data) + eid += 1 + + if sort_by_time: + # Sort on the basis of execution time. Prints the most expensive ops in the start. + data = sorted(data, key=lambda x: x[2], reverse=True) + # Insert a row for total time at the end. + rounded_total_time_us = round(total_time * 1e6, 3) + data.append(["Total_time", "-", rounded_total_time_us, "-", "-", "-", "-", "-", "-"]) + + fmt = "" + for i, _ in enumerate(header): + max_len = len(header[i]) + for j, _ in enumerate(data): + item_len = len(str(data[j][i])) + if item_len > max_len: + max_len = item_len + fmt = fmt + "{:<" + str(max_len + 2) + "}" + log = [fmt.format(*header)] + log.append(fmt.format(*lines)) + for row in data: + log.append(fmt.format(*row)) + return "\n".join(log) + + def display_debug_result(self, sort_by_time=True): + """Displays the debugger result""" + print(self.get_debug_result(sort_by_time)) + + +def save_tensors(params): + """Save parameter dictionary to binary bytes. + + The result binary bytes can be loaded by the + GraphModule with API "load_params". + + Parameters + ---------- + params : dict of str to NDArray + The parameter dictionary. + + Returns + ------- + param_bytes: bytearray + Serialized parameters. + """ + _save_tensors = tvm.get_global_func("tvm.relay._save_param_dict") + + return _save_tensors(params) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_runtime.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..ebd903b475709bf91a68be564fd7e5ab915c6221 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/debugger/debug_runtime.py @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Deprecated Python API for DebugExecutor.""" + +import warnings + +from . import debug_executor + + +def create(*args, **kwargs): + warnings.warn( + "This function has been moved to tvm.contrib.graph_executor and will be removed " + "in the next TVM release" + ) + return debug_executor.create(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/dlpack.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/dlpack.py new file mode 100644 index 0000000000000000000000000000000000000000..75b37cef6199ca1f6b6d05bd6d64da5e7cab2936 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/dlpack.py @@ -0,0 +1,65 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Wrapping functions to bridge frameworks with DLPack support to TVM""" +from tvm.runtime import ndarray + + +def convert_func(tvm_func, tensor_type, to_dlpack_func): + """Convert a tvm function into one that accepts a tensor from another + framework, provided the other framework supports DLPACK + + Parameters + ---------- + tvm_func: Function + Built tvm function operating on arrays + + tensor_type: Type + Type of the tensors of the target framework + + to_dlpack_func: Function + Function to convert the source tensors to DLPACK + """ + assert callable(tvm_func) + + def _wrapper(*args): + args = tuple( + ndarray.from_dlpack(to_dlpack_func(arg)) if isinstance(arg, tensor_type) else arg + for arg in args + ) + return tvm_func(*args) + + return _wrapper + + +def to_pytorch_func(tvm_func): + """Convert a tvm function into one that accepts PyTorch tensors + + Parameters + ---------- + tvm_func: Function + Built tvm function operating on arrays + + Returns + ------- + wrapped_func: Function + Wrapped tvm function that operates on PyTorch tensors + """ + # pylint: disable=import-outside-toplevel + import torch + import torch.utils.dlpack + + return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/dnnl.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/dnnl.py new file mode 100644 index 0000000000000000000000000000000000000000..a722199943301d7a3dfd801ceaabcdf5c558c6e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/dnnl.py @@ -0,0 +1,162 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to BLAS libraries.""" +import tvm +from tvm import te +from ..topi.nn.utils import get_pad_tuple + + +def matmul(lhs, rhs, transa=False, transb=False, **kwargs): + """Create an extern op that compute matrix mult of A and rhs with CrhsLAS + This function serves as an example on how to call external libraries. + + Parameters + ---------- + lhs: Tensor + The left matrix operand + rhs: Tensor + The right matrix operand + transa: bool + Whether transpose lhs + transb: bool + Whether transpose rhs + + Returns + ------- + C: Tensor + The result tensor. + """ + n = lhs.shape[1] if transa else lhs.shape[0] + m = rhs.shape[0] if transb else rhs.shape[1] + return te.extern( + (n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.dnnl.matmul", ins[0], ins[1], outs[0], transa, transb + ), + name="C", + **kwargs, + ) + + +def dnnl_conv2d( + src, + weights, + stride, + padding, + dilation, + groups, + channel_last=False, + out_dtype="float32", + **kwargs, +): + """Convolution operator in NCHW layout. + + Parameters + ---------- + src : tvm.te.Tensor + 4-D with shape [batch, in_channel, in_height, in_width] + + weights : tvm.te.Tensor + 4-D with shape [num_filter, in_channel, filter_height, filter_width] + + stride : int or a list/tuple of two ints + Stride size, or [stride_height, stride_width] + + padding : int or a list/tuple of 2 or 4 ints + padding size, or + [pad_height, pad_width] for 2 ints, or + [pad_top, pad_left, pad_bottom, pad_right] for 4 ints + + dilation: int or a list/tuple of two ints + dilation size, or [dilation_height, dilation_width] + + groups: str + input data layout: NCHW or NHWC + + channel_last: bool + chose if input/output data format is in channel_last format(NHWC) or + in plain format(NCHW) + + out_dtype: str + output datatype: now only support float32 + + Returns + ------- + Output : tvm.te.Tensor + 4-D with shape [batch, out_channel, out_height, out_width] + """ + + assert isinstance(stride, int) or len(stride) == 2 + assert isinstance(dilation, int) or len(dilation) == 2 + if isinstance(stride, int): + stride_h = stride_w = stride + else: + stride_h, stride_w = stride + + if isinstance(dilation, int): + dilation_h = dilation_w = dilation + else: + dilation_h, dilation_w = dilation + + pre_cast = src.dtype == "float32" + post_cast = out_dtype == "float32" + + if channel_last: + batch, in_height, in_width, _ = src.shape + kernel_h, kernel_w, _, num_filter = weights.shape + else: + batch, _, in_height, in_width = src.shape + num_filter, _, kernel_h, kernel_w = weights.shape + + dilated_kernel_h = (kernel_h - 1) * dilation_h + 1 + dilated_kernel_w = (kernel_w - 1) * dilation_w + 1 + pad_top, pad_left, pad_down, pad_right = get_pad_tuple( + padding, (dilated_kernel_h, dilated_kernel_w) + ) + out_channel = num_filter + out_height = (in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1 + out_width = (in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1 + + if channel_last: + out_shape = (batch, out_height, out_width, out_channel) + else: + out_shape = (batch, out_channel, out_height, out_width) + + return te.extern( + out_shape, + [src, weights], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.dnnl.conv2d", + ins[0], + ins[1], + outs[0], + pad_top, + pad_down, + pad_left, + pad_right, + stride[0], + stride[1], + groups, + channel_last, + pre_cast, + post_cast, + ), + name="C", + dtype=out_dtype, + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/download.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/download.py new file mode 100644 index 0000000000000000000000000000000000000000..dce9c50a787eafc608f7165d488c1baf486668fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/download.py @@ -0,0 +1,176 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=missing-timeout +"""Helper utility for downloading""" + +import logging +import os +from pathlib import Path +import shutil +import tempfile +import time + +LOG = logging.getLogger("download") + + +def download(url, path, overwrite=False, size_compare=False, retries=3): + """Downloads the file from the internet. + Set the input options correctly to overwrite or do the size comparison + + Parameters + ---------- + url : str + Download url. + + path : str + Local file path to save downloaded file. + + overwrite : bool, optional + Whether to overwrite existing file, defaults to False. + + size_compare : bool, optional + Whether to do size compare to check downloaded file, defaults + to False + + retries: int, optional + Number of time to retry download, defaults to 3. + + """ + # pylint: disable=import-outside-toplevel + import urllib.request as urllib2 + + path = Path(path).resolve() + if path.exists() and path.is_file() and not overwrite: + if size_compare: + import requests + + file_size = path.stat().st_size + res_head = requests.head(url) + res_get = requests.get(url, stream=True) + if "Content-Length" not in res_head.headers: + res_get = urllib2.urlopen(url) + url_file_size = int(res_get.headers["Content-Length"]) + if url_file_size != file_size: + LOG.warning("Existing file %s has incorrect size, downloading fresh copy", path) + download(url, path, overwrite=True, size_compare=False, retries=retries) + return + + LOG.info("File %s exists, skipping.", path) + return + + LOG.info("Downloading from url %s to %s", url, path) + + # Stateful start time + start_time = time.time() + dirpath = path.parent + dirpath.mkdir(parents=True, exist_ok=True) + + def _download_progress(count, block_size, total_size): + # pylint: disable=unused-argument + """Show the download progress.""" + if count == 0: + return + duration = time.time() - start_time + progress_bytes = int(count * block_size) + progress_megabytes = progress_bytes / (1024.0 * 1024) + speed_kbps = int(progress_bytes / (1024 * duration)) + percent = min(int(count * block_size * 100 / total_size), 100) + + # Temporarily suppress newlines on the output stream. + prev_terminator = logging.StreamHandler.terminator + logging.StreamHandler.terminator = "" + LOG.debug( + "\r...%d%%, %.2f MB, %d KB/s, %d seconds passed", + percent, + progress_megabytes, + speed_kbps, + duration, + ) + logging.StreamHandler.terminator = prev_terminator + + with tempfile.TemporaryDirectory() as tempdir: + tempdir = Path(tempdir) + download_loc = tempdir.joinpath(path.name) + + for i_retry in range(retries): + # pylint: disable=broad-except + try: + + urllib2.urlretrieve(url, download_loc, reporthook=_download_progress) + LOG.debug("") + try: + download_loc.rename(path) + except OSError: + # Prefer a move, but if the tempdir and final + # location are in different drives, fall back to a + # copy. + shutil.copy2(download_loc, path) + return + + except Exception as err: + if i_retry == retries - 1: + raise err + + LOG.warning( + "%s\nDownload attempt %d/%d failed, retrying.", repr(err), i_retry, retries + ) + + +if "TEST_DATA_ROOT_PATH" in os.environ: + TEST_DATA_ROOT_PATH = Path(os.environ.get("TEST_DATA_ROOT_PATH")) +else: + TEST_DATA_ROOT_PATH = Path(Path("~").expanduser(), ".tvm_test_data") +TEST_DATA_ROOT_PATH.mkdir(parents=True, exist_ok=True) + + +def download_testdata(url, relpath, module=None, overwrite=False): + """Downloads the test data from the internet. + + Parameters + ---------- + url : str + Download url. + + relpath : str + Relative file path. + + module : Union[str, list, tuple], optional + Subdirectory paths under test data folder. + + overwrite : bool, defaults to False + If True, will download a fresh copy of the file regardless of + the cache. If False, will only download the file if a cached + version is missing. + + Returns + ------- + abspath : str + Absolute file path of downloaded file + + """ + global TEST_DATA_ROOT_PATH + if module is None: + module_path = "" + elif isinstance(module, str): + module_path = module + elif isinstance(module, (list, tuple)): + module_path = Path(*module) + else: + raise ValueError("Unsupported module: " + module) + abspath = Path(TEST_DATA_ROOT_PATH, module_path, relpath) + download(url, abspath, overwrite=overwrite, size_compare=False) + return str(abspath) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/emcc.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/emcc.py new file mode 100644 index 0000000000000000000000000000000000000000..3beb096b6747de16c4c9b2fe2c7625e016623516 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/emcc.py @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Util to invoke emscripten compilers in the system.""" +# pylint: disable=invalid-name +import os +import subprocess +from pathlib import Path + +from tvm._ffi.base import py_str +from tvm._ffi.libinfo import find_lib_path + + +def create_tvmjs_wasm(output, objects, options=None, cc="emcc", libs=None): + """Create wasm that is supposed to run with the tvmjs. + + Parameters + ---------- + output : str + The target shared library. + + objects : list + List of object files. + + options : str + The additional options. + + cc : str, optional + The compile string. + + libs : list + List of user-defined library files (e.g. .bc files) to add into the wasm. + """ + cmd = [cc] + cmd += ["-O3"] + cmd += ["-std=c++17"] + cmd += ["--no-entry"] + # NOTE: asynctify conflicts with wasm-exception + # so we temp disable exception handling for now + # + # We also expect user to explicitly pass in + # -s ASYNCIFY=1 as it can increase wasm size by 2xq + # + # cmd += ["-s", "ASYNCIFY=1"] + # cmd += ["-fwasm-exceptions"] + cmd += ["-s", "WASM_BIGINT=1"] + cmd += ["-s", "ERROR_ON_UNDEFINED_SYMBOLS=0"] + cmd += ["-s", "STANDALONE_WASM=1"] + cmd += ["-s", "ALLOW_MEMORY_GROWTH=1"] + cmd += ["-s", "TOTAL_MEMORY=160MB"] + + objects = [objects] if isinstance(objects, str) else objects + + with_runtime = False + for obj in objects: + if obj.find("wasm_runtime.bc") != -1: + with_runtime = True + + all_libs = [] + if not with_runtime: + all_libs += [find_lib_path("wasm_runtime.bc")[0]] + + all_libs += [find_lib_path("tvmjs_support.bc")[0]] + all_libs += [find_lib_path("webgpu_runtime.bc")[0]] + + if libs: + if not isinstance(libs, list): + raise ValueError("Expect `libs` to be a list of paths in string.") + for lib in libs: + if not Path(lib).exists(): + raise RuntimeError( + "Cannot find file from libs:" + lib + "\n Try pass in an absolute path." + ) + all_libs += libs + + cmd += ["-o", output] + + # let libraries go before normal object + cmd += all_libs + objects + + if options: + cmd += options + + is_windows = os.name == "nt" + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=is_windows) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = "Compilation error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + +create_tvmjs_wasm.object_format = "bc" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0ac5badae57214be34ad6224877b6759bf3040b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Namespace for Arm(R) Ethos(TM)-U NPU contrib functionality""" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..783f76de1de67295a69e73d8c7fd742424d69772 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1d608c04ff6e5feb6f3363e6a3cd8b350f51c92f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/__init__.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""The NPU cascader. + +This component performs inter-operator scheduling to optimize +for both performance and memory usage on Arm(R) Ethos(TM)-U NPUs. +""" +from .stripe_config import StripeConfig +from .block_config import BlockConfig +from .propagator import Propagator +from .graph import ( + PerformanceInfo, + Tensor, + Part, + TESubgraph, + CascaderGraph, + BufferMode, + register_matcher, + create_cascader_graph, +) +from .parts import InlinePart, EthosuPart +from .device_config import EthosuDeviceConfig +from .tensor_config import TensorConfigState, MemoryRegion, TensorConfig +from .plan import Plan +from .scheduler import apply_proposal, cascade, extract_memory_info +from .logging import Logging +from .cascader_options import CascaderOptions diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/_ffi_api.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/_ffi_api.py new file mode 100644 index 0000000000000000000000000000000000000000..9f098ad3df74a01aaee9906f5b47c917622e8b79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/_ffi_api.py @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""FFI APIs for the NPU cascader.""" +import tvm._ffi + + +tvm._ffi._init_api("contrib.ethosu.cascader", __name__) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/block_config.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/block_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b90de753f67927bf7063208d7670c67e5797025e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/block_config.py @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Block config to hold an output block shape and a corresponding input block shape""" +from typing import List +import tvm._ffi + +from tvm.runtime import Object + +from . import _ffi_api + + +@tvm._ffi.register_object("contrib.ethosu.cascader.BlockConfig") +class BlockConfig(Object): + """BlockConfig class""" + + def __init__( + self, + input_shape: List[int], + output_shape: List[int], + compute_cycles: int, + output_cycles: int, + ): + self.__init_handle_by_constructor__( + _ffi_api.BlockConfig, input_shape, output_shape, compute_cycles, output_cycles + ) + + @property + def input_shape(self) -> List[int]: + return list(self._input_shape) + + @property + def output_shape(self) -> List[int]: + return list(self._output_shape) + + @property + def compute_cycles(self) -> int: + return int(self._compute_cycles) + + @property + def output_cycles(self) -> int: + return int(self._output_cycles) + + def __ge__(self, other: "BlockConfig"): + if len(self.output_shape) != len(other.output_shape): + return False + + return all(a >= b for a, b in zip(self.output_shape, other.output_shape)) + + def __lt__(self, other: "BlockConfig"): + if len(self.output_shape) != len(other.output_shape): + return False + + return other >= self + + def __repr__(self) -> str: + return f"BlockConfig(output_shape={self.output_shape})" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/cascader_options.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/cascader_options.py new file mode 100644 index 0000000000000000000000000000000000000000..9d5562c44b39c6772660cdc1ffa37b3805ea83fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/cascader_options.py @@ -0,0 +1,90 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Object to hold options for the NPU cascader""" +import tvm._ffi + +from tvm.runtime import Object + +from . import _ffi_api +from .tensor_config import MemoryRegion + + +@tvm._ffi.register_object("contrib.ethosu.cascader.CascaderOptions") +class CascaderOptions(Object): + """ + A class to hold configuration options for the cascader. + + Attributes + ---------- + cascade_region : MemoryRegion + The MemoryRegion to place cascading buffers into. + max_proposals : int + The maximum number of Proposals to generate. + stripe_factors : int + How many striping factors to try per axis. + max_plan_size : int + The maximum number of Parts in a Plan. + max_open_plans : int + The maximum number of open Plans to keep after culling. + max_closed_plans : int + The maxmum number of closed Plans to keep after culling. + always_copy_size : int + The maximum size of a Tensor that will always be copied into the cascade region. + disable_pareto_plans : bool + Disable pareto culling for Plans. + disable_pareto_proposals : bool + Disable pareto culling for Proposals. + enable_multi_dimensional_striping : bool + Enable striping in multiple dimensions simultaneously. + disable_block_culling : bool + Disable culling of block configs. + enable_striping : bool + A boolean option to enable striping + + """ + + def __init__( + self, + cascade_region: MemoryRegion, + max_proposals: int, + stripe_factors: int, + max_plan_size: int, + max_open_plans: int, + max_closed_plans: int, + always_copy_size: int, + disable_pareto_plans: bool = False, + disable_pareto_proposals: bool = False, + enable_multi_dimensional_striping: bool = False, + disable_block_culling: bool = True, + enable_striping: bool = False, + ): + self.__init_handle_by_constructor__( + _ffi_api.CascaderOptions, + cascade_region, + max_proposals, + stripe_factors, + max_plan_size, + max_open_plans, + max_closed_plans, + always_copy_size, + disable_pareto_plans, + disable_pareto_proposals, + enable_multi_dimensional_striping, + disable_block_culling, + enable_striping, + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/device_config.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/device_config.py new file mode 100644 index 0000000000000000000000000000000000000000..7c38c4ac4971b8127ea0aac3bad36cbef4d81142 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/device_config.py @@ -0,0 +1,895 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +# pylint: disable=too-many-nested-blocks +"""Device config class to hold information about the target hardware""" +from typing import Tuple, List, Dict, Optional +from functools import reduce + +import math +import numpy as np + +import tvm +from . import BlockConfig +from . import StripeConfig +from . import Propagator + + +def _round_up(a: int, b: int) -> int: + """Round up to a multiple of b""" + return ((a + b - 1) // b) * b + + +def _round_up_div(a: int, b: int) -> int: + """Divide by b and round up to a multiple of b""" + return (a + b - 1) // b + + +class _Shape: + """Helper class for dealing with Tensor shapes of different layouts""" + + def __init__(self, shape: List[int], layout="NHWC"): + if layout == "NHCWB16": + self.height = int(shape[1]) + self.width = int(shape[3]) + self.depth = int(shape[2]) * int(shape[4]) + else: + # identity layout is NHWC but the shape is not always 4 + length = len(shape) + if length == 4: + self.height = int(shape[1]) + self.width = int(shape[2]) + self.depth = int(shape[3]) + elif length == 3: + self.height = int(shape[0]) + self.width = int(shape[1]) + self.depth = int(shape[2]) + elif length == 2: + self.height = int(shape[0]) + self.width = int(shape[1]) + self.depth = 1 + elif length == 1: + self.height = int(shape[0]) + self.width = 1 + self.depth = 1 + + def round_up(self, other: "_Shape"): + self.height = _round_up(self.height, other.height) + self.width = _round_up(self.width, other.width) + self.depth = _round_up(self.depth, other.depth) + + def area(self) -> int: + return self.height * self.width + + def as_list(self): + return [1, self.height, self.width, self.depth] + + +class EthosuDeviceConfig: + """Arm(R) Ethos(TM)-U NPU config class""" + + def __init__(self, device: str, disable_block_bulling: bool = False): + self._device = device + self._subkernel_limits = (8, 8) + self._output_cycles = (1, 2, 3, 4, 6) + self._split_depth = 16 + self._max_block_shape = _Shape([1, 32, 64, 128]) + self._bank_size_bytes = 1024 + self._disable_block_culling = disable_block_bulling + if self._device == "ethos-u55-256": + self._micro_block = _Shape([1, 2, 2, 8]) + self._input_micro_block = _Shape([1, 2, 2, 8]) + self._delay_cycles = (2, 2) + self._activation_cycles = (0.25, 1) + self._output_units = 8 + + self._total_banks = 48 + self._reserved_banks = 4 + self._input_granularity = {1: 8, 2: 8, 4: 16} + self._accumulator_granularity = {4: 16, 5: 20} + self._lut_reserved = True + elif self._device == "ethos-u55-128": + self._micro_block = _Shape([1, 1, 2, 8]) + self._input_micro_block = _Shape([1, 1, 2, 8]) + self._delay_cycles = (2, 3) + self._activation_cycles = (0.5, 1) + self._output_units = 4 + + self._total_banks = 24 + self._reserved_banks = 4 + self._input_granularity = {1: 4, 2: 4, 4: 8} + self._accumulator_granularity = {4: 8, 5: 12} + self._lut_reserved = True + elif self._device == "ethos-u55-64": + self._micro_block = _Shape([1, 1, 1, 8]) + self._input_micro_block = _Shape([1, 1, 1, 8]) + self._delay_cycles = (2, 3) + self._activation_cycles = (1, 1) + self._output_units = 2 + + self._total_banks = 16 + self._reserved_banks = 2 + self._input_granularity = {1: 2, 2: 2, 4: 4} + self._accumulator_granularity = {4: 4, 5: 8} + self._lut_reserved = False + elif self._device == "ethos-u55-32": + self._micro_block = _Shape([1, 1, 1, 4]) + self._input_micro_block = _Shape([1, 1, 1, 8]) + self._delay_cycles = (3, 7) + self._activation_cycles = (1, 2) + self._output_units = 1 + + self._total_banks = 16 + self._reserved_banks = 2 + self._input_granularity = {1: 2, 2: 2, 4: 4} + self._accumulator_granularity = {4: 4, 5: 4} + self._lut_reserved = False + + def _get_output_cycles( + self, op_type: str, op_str: str, ifm_dtype: str, ofm_dtype: str, activation: str + ) -> float: + """Estimate cycles per output element for an NPU operator + + Parameters + ---------- + op_type : str + The NPU primitive operator + "ethosu_pooling" + op_str : str + The type of NPU operator. + "MAX" + ifm_dtype: str + Datatype of the Input Feature Map tensor (IFM) + ofm_dtype: str + Datatype of the Output Feature Map tensor (OFM) + activation : str + The activation function to use. + "NONE" - no activation function. + "CLIP" - clip the output between clip_min and clip_max. + "TANH" - tanh activation function. + "SIGMOID" - sigmoid activation function. + "LUT" - use a look-up table to perform the activation function. + + Returns + ------- + float + The cycles per output element + """ + cycles = 0 + bw_limit = 0 + if op_type == "ethosu_pooling" and op_str == "MAX": + cycles = self._output_cycles[0] + elif op_type in ("ethosu_pooling", "ethosu_conv2d", "ethosu_depthwise_conv2d"): + cycles = self._output_cycles[1] if ifm_dtype == "int8" else self._output_cycles[2] + elif op_type == "ethosu_binary_elementwise": + # Binary Bandwidth Limitations + if ifm_dtype == "int8": + bw_limit = 0.125 if ofm_dtype == "int8" else 0.75 + elif ifm_dtype == "int16": + bw_limit = 0.75 if ofm_dtype == "int16" else 1 + else: + bw_limit = 1.5 + + if op_str in ("MIN", "MAX"): + cycles = self._output_cycles[1] + elif op_str == "MUL": + cycles = self._output_cycles[2] + if op_str in ("ADD", "SUB"): + if ofm_dtype == "int32": + cycles = ( + self._output_cycles[2] if ifm_dtype == "int32" else self._output_cycles[3] + ) + else: + cycles = self._output_cycles[4] + + elif op_type == "ethosu_unary_elementwise": + # Unary Bandwidth Limitations + if ifm_dtype == "int16": + bw_limit = 0.25 + elif ifm_dtype == "int32": + bw_limit = 1 + + if op_str == "CLZ": + cycles = self._output_cycles[1] + elif op_str in ("SHL", "SHR"): + cycles = self._output_cycles[2] + elif op_str in ("LRELU", "ABS"): + cycles = self._output_cycles[1] + if ifm_dtype == "int16": + bw_limit = 0.5 + + act_cycles = 0 + if activation == "CLIP": + act_cycles = self._activation_cycles[0] + elif activation in ("LUT", "TANH", "SIGMOID"): + act_cycles = self._activation_cycles[1] + + return max((cycles / self._output_units), act_cycles, bw_limit) + + def _get_delay_cycles(self, op_type: str, ifm_dtype: str) -> int: + """Get the number of delay cycles during a bubble + + Parameters + ---------- + op_type : str + The NPU primitive operator + "ethosu_pooling" + op_str : str + The type of NPU operator. + "MAX" + ifm_dtype: str + Datatype of the Input Feature Map tensor (IFM) + + Returns + ---------- + int + The amount of delay cycles + """ + if op_type in ("ethosu_conv2d", "ethosu_depthwise2d", "ethosu_pooling"): + if ifm_dtype == "int16": + return self._delay_cycles[1] + + return self._delay_cycles[0] + + return 0 + + def _get_weight_decoder_cycles(self, op_type: str) -> int: + """Get cycle estimate for weight decoding + + Parameters + ---------- + op_type: str + The NPU primitive operator + "ethosu_pooling" + + Returns + ---------- + int + Estimated cycles for weight decoding + """ + if op_type in ("ethosu_conv2d", "ethosu_depthwise2d"): + return 32 * self._micro_block.depth // 8 + + return 0 + + def get_output_quantum(self, ofm_layout: str) -> Tuple[int]: + """Get the atomic output volume + + Parameters + ---------- + ofm_layout : str + The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16". + + Returns + ---------- + Tuple[int] + The atomic output volume formatted to the ofm_layout parameter + """ + if ofm_layout == "NHCWB16": + return [ + 1, + self._micro_block.height, + 1, + self._micro_block.width, + self._micro_block.depth, + ] + + return self._micro_block.as_list() + + def _align(self, x: int, n: int) -> int: + return int(math.ceil(x / n) * n) + + def _get_input_size( + self, output_size: int, kernel_stride: int, border: int, upscaling_factor: int + ) -> int: + return int(math.ceil(((output_size - 1) * kernel_stride + border)) / upscaling_factor) + + def _get_dilated_kernel_size(self, kernel_size: int, dilation: int) -> int: + return (kernel_size - 1) * dilation + 1 + + def _get_input_block( + self, + output_block: _Shape, + input_shape: _Shape, + dtype: str, + op_type: str, + partkernel: bool, + stride_h: int, + stride_w: int, + dilated_kernel_h: int, + dilated_kernel_w: int, + upscaling_factor: int, + ) -> _Shape: + height = self._get_input_size( + output_block.height, + stride_h, + min(dilated_kernel_h, self._subkernel_limits[0]), + upscaling_factor, + ) + width = self._get_input_size( + output_block.width, + stride_w, + min(dilated_kernel_w, self._subkernel_limits[1]), + upscaling_factor, + ) + + if op_type == "ethosu_conv2d": + if dtype == "int8": + if partkernel: + depth = self._align(min(32, input_shape.depth), 8) + else: + depth = self._align(min(16, input_shape.depth), 8) + elif dtype == "int16": + depth = self._align(min(16, input_shape.depth), 4) + else: + depth = self._align(min(8, input_shape.depth), 2) + else: + depth = output_block.depth + + return _Shape( + [ + 1, + self._align(height, self._micro_block.height), + self._align(width, self._micro_block.width), + depth, + ] + ) + + def get_kernel_steps( + self, + op_type: str, + dilated_kernel_h: int, + dilated_kernel_w: int, + ifm_dtype: str, + partkernel: bool = False, + ) -> List[int]: + """Calculate the total number of subkernels and their sizes + + Parameters + ---------- + op_type : str + The NPU primitive operator + "ethosu_pooling" + dilated_kernel_h: int + Height of dilated kernel + dilated_kernel_w: int + Width of dilated kernel + ifm_dtype: str + Datatype of the Input Feature Map tensor (IFM) + partkernel: bool + Flag showing whether part-kernel first traversal is used + + Returns + ---------- + List[int] + List where each entry contains the amount of elements in one of the subkernels + """ + if op_type == "ethosu_binary_elementwise": + return [1] + + subkernels = self._get_subkernels(dilated_kernel_h, dilated_kernel_w) + + # Determine the number of kernel steps per subkernel + kernel_steps = [] + for y, x in subkernels: + subkernel_elements = x * y + if op_type == "ethosu_conv2d" and partkernel: + # Part-kernel-first traversal conv2d + divisor = 4 if ifm_dtype == "int8" else 2 + kernel_steps.append(int(_round_up_div(subkernel_elements, divisor))) + elif op_type == "ethosu_depthwise_conv2d": + kernel_steps.append(int(_round_up_div(subkernel_elements, 4))) + else: + # Depth-first traversal conv2d or pooling + kernel_steps.append(int(subkernel_elements)) + + return kernel_steps + + def _get_subkernels(self, dilated_kernel_h: int, dilated_kernel_w: int): + num_subkernels_y = _round_up_div(dilated_kernel_h, self._subkernel_limits[0]) + num_subkernels_x = _round_up_div(dilated_kernel_w, self._subkernel_limits[1]) + subkernels_y = [ + min((dilated_kernel_h - i * self._subkernel_limits[0]), self._subkernel_limits[0]) + for i in range(num_subkernels_y) + ] + subkernels_x = [ + min((dilated_kernel_w - i * self._subkernel_limits[1]), self._subkernel_limits[1]) + for i in range(num_subkernels_x) + ] + + subkernels = [] + for y in subkernels_y: + for x in subkernels_x: + subkernels.append((y, x)) + + return subkernels + + def _get_accumulator_width(self, op_type: str, ifm_dtype: str): + if ifm_dtype == "int16" and op_type != "ethosu_pooling": + return 5 + + return 4 + + def is_partkernel( + self, op_type: str, ifm_channels: int, ifm_dtype: str, kernel_elements: int + ) -> bool: + """Determine which block traversal strategy has better DPU utilization + + Parameters + ---------- + op_type: str + The NPU primitive operator + "ethosu_pooling" + ifm_channels: int + Number of input channels + ifm_dtype: str + Datatype of the Input Feature Map tensor (IFM) + kernel_elements: int + Total number of elements in the kernel + + Returns + ---------- + bool + True if partkernel first has best DPU utilization + """ + if op_type != "ethosu_conv2d": + return False + + depth_first_utilization = ifm_channels / _round_up( + ifm_channels, 32 if ifm_dtype == "int8" else 16 + ) + part_kernel_first_utilization = (ifm_channels / _round_up(ifm_channels, 8)) * ( + kernel_elements / _round_up(kernel_elements, 4 if ifm_dtype == "int8" else 2) + ) + + return part_kernel_first_utilization > depth_first_utilization or ifm_channels <= 8 + + def _get_input_banks(self, input_block_shape, input_bytewidth): + input_bytes = input_block_shape.area() * self._align( + input_block_shape.depth * input_bytewidth, 8 + ) + input_banks = _round_up_div(input_bytes, self._bank_size_bytes) * 2 + input_banks = _round_up(input_banks, self._input_granularity[input_bytewidth]) + + return input_banks + + def _get_accumulator_banks(self, output_block_shape, acc_bytewidth): + acc_depth = _round_up(output_block_shape.depth, 8) + acc_bytes = output_block_shape.area() * self._align(acc_depth, 8) * acc_bytewidth + acc_banks = _round_up_div(acc_bytes, self._bank_size_bytes) * 2 + acc_banks = _round_up(acc_banks, self._accumulator_granularity[acc_bytewidth]) + + return acc_banks + + @staticmethod + def _create_layout_block(nhwc_block_config, layout): + """A helper function to convert to brick layout""" + if layout == "NHCWB16": + return [ + nhwc_block_config[0], + nhwc_block_config[1], + 1 + ((nhwc_block_config[3] - 1) // 16), + nhwc_block_config[2], + 16, + ] + # else it could only be NHWC + return nhwc_block_config + + def get_elementwise_block_config( + self, + ifm_propagator: Propagator, + ifm2_propagator: Optional[Propagator], + op_attrs: Dict, + ofm_shape: List[int], + output_layout: str, + input_layout: str, + input2_layout: Optional[str], + ifm_dtype: str, + ofm_dtype: str, + ) -> List[BlockConfig]: + """Get a suitable block config for an elementwise operator + + Parameters + ---------- + ifm_propagator: Propagator, + The propagator containing the data dependencies between input and output + ifm2_propagator: Propagator, + The propagator containing the data dependencies between input2 and output + op_attrs: Dict, + Dictionary containing operator attributes + ofm_shape: List[int], + Shape of the output tensor + output_layout: str, + The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16". + input_layout: str, + The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16". + input2_layout: str, + The layout of the Input2 Feature Map tensor. Can be "NHWC" or "NHCWB16". + ifm_dtype: str, + Datatype of the Input Feature Map tensor (IFM) + ofm_dtype: str, + Datatype of the Output Feature Map tensor (OFM) + + Returns + ---------- + List[BlockConfig] + List containing a single suitable block config + """ + block_config = [] + output_shape = [int(a) for a in ofm_shape] + + op_type = op_attrs.get("op") + op_str = op_attrs.get("op_str") + activation = op_attrs.get("activation", "NONE") + + input_bytewidth = 1 if ifm_dtype == "int8" else 2 if ifm_dtype == "int16" else 4 + banks_available = self._total_banks - self._reserved_banks + if activation == "LUT" and not self._lut_reserved: + banks_available -= 2 + + # Handle user-forced block config + options = tvm.transform.PassContext.current().config.get("relay.ext.ethos-u.options", None) + if options and options.dev_force_block_config: + block_config = [int(v) for v in options.dev_force_block_config.split("x")] + assert len(block_config) == 3 + if output_layout == "NHWC": + block_shape = [output_shape[0], block_config[0], block_config[1], block_config[2]] + else: + block_shape = [ + output_shape[0], + block_config[0], + 1 + ((block_config[2] - 1) // 16), + block_config[1], + 16, + ] + output_cycles = self._get_output_cycles( + op_type, op_str, ifm_dtype, ofm_dtype, activation + ) + output_cycles *= reduce(lambda a, b: a * b, block_shape, 1) + output_cycles = int(math.ceil(output_cycles)) + return [BlockConfig(block_shape, block_shape, 0, output_cycles)] + + # Split the block in half until it fits into SHRAM + max_height, max_width, max_depth = self._max_block_shape.as_list()[1:] + if output_layout == "NHCWB16": + output_height = output_shape[1] + output_width = output_shape[3] + output_channels = output_shape[2] * 16 + else: + output_height = output_shape[1] + output_width = output_shape[2] + output_channels = output_shape[3] + + output_nhwc_block = [ + 1, + _round_up(min(output_height, max_height), self._micro_block.height), + _round_up(min(output_width, max_width), self._micro_block.width), + _round_up(min(output_channels, max_depth), self._micro_block.depth), + ] + output_block = self._create_layout_block(output_nhwc_block, output_layout) + split_order = (a for a in [1, 2, 3]) + split_axis = next(split_order) + + offset = [0] * len(output_block) + stripes = [1] * len(output_block) + order = [1, 2, 4, 3, 0] if output_layout == "NHCWB16" else [1, 2, 3, 4] + while True: + # Create stripe config for output block + output_stripe_config = StripeConfig( + output_block, output_block, output_block, order, stripes, offset + ) + + # Propagate the output to obtain the two input blocks + input_block = _Shape(ifm_propagator.propagate(output_stripe_config).shape, input_layout) + if ifm2_propagator: + input2_block = _Shape( + ifm2_propagator.propagate(output_stripe_config).shape, input2_layout + ) + else: + # Unary elementwise + input2_block = input_block + + input_block.round_up(self._input_micro_block) + input2_block.round_up(self._input_micro_block) + + # Banks required for input block + input_banks = self._get_input_banks(input_block, input_bytewidth) + # Banks required for input2 block + input2_banks = self._get_input_banks(input2_block, input_bytewidth) + + # Check whether or not both IFMs fit into SHRAM + if (input_banks + input2_banks) <= banks_available: + output_cycles = self._get_output_cycles( + op_type, op_str, ifm_dtype, ofm_dtype, activation + ) + output_cycles *= reduce(lambda a, b: a * b, output_block, 1) + output_cycles = int(math.ceil(output_cycles)) + block_config.append( + BlockConfig(input_block.as_list(), output_block, 0, output_cycles) + ) + break + + if output_nhwc_block[split_axis] == self._micro_block.as_list()[split_axis]: + split_axis = next(split_order) + + output_nhwc_block[split_axis] = _round_up( + _round_up_div(output_nhwc_block[split_axis], 2), + self._micro_block.as_list()[split_axis], + ) + output_block = self._create_layout_block(output_nhwc_block, output_layout) + + return block_config + + def _get_subkernel_propagator( + self, op_attrs, ifm_propagator, input_layout, output_layout, depth + ): + op_type = op_attrs.get("op") + stride_h = int(op_attrs.get("stride_h", 1)) + stride_w = int(op_attrs.get("stride_w", 1)) + transform = ifm_propagator.transform + + if op_type != "ethosu_identity": + if input_layout == "NHCWB16": + transform[1][-1] = min(transform[1][-1], self._subkernel_limits[0] - stride_h) + transform[3][-1] = min(transform[3][-1], self._subkernel_limits[1] - stride_w) + else: + transform[1][-1] = min(transform[1][-1], self._subkernel_limits[0] - stride_h) + transform[2][-1] = min(transform[2][-1], self._subkernel_limits[1] - stride_w) + + if op_type in ("ethosu_pooling", "ethosu_depthwise_conv2d"): + if output_layout == "NHCWB16" and input_layout == "NHWC": + transform[3][-1] = depth + elif output_layout == "NHCWB16" and input_layout == "NHCWB16": + transform[2][-1] = 1 + ((depth - 1) // 16) + + return Propagator(transform, ifm_propagator.offset) + + def get_valid_block_configs( + self, + ifm_propagator: Propagator, + op_attrs: Dict, + ofm_shape: List[int], + ofm_channels: int, + ifm_channels: int, + output_layout: str, + input_layout: str, + ifm_dtype: str, + ofm_dtype: str, + kernel_h: int = 1, + kernel_w: int = 1, + ) -> List[BlockConfig]: + """Get all of the valid block configs + + Parameters + ---------- + ifm_propagator: Propagator, + The propagator containing the data dependencies between input and output + op_attrs: Dict, + Dictionary containing operator attributes + ofm_shape: List[int], + Shape of the output tensor + ofm_channels: int, + Number of output channels + ifm_channels: int, + Number of input channels + output_layout: str, + The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16". + input_layout: str, + The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16". + ifm_dtype: str, + Datatype of the Input Feature Map tensor (IFM) + ofm_dtype: str, + Datatype of the Output Feature Map tensor (OFM) + kernel_h: int, + Height of kernel + kernel_h: int + Width of kernel + + Returns + ---------- + List[BlockConfig] + List containing all of the valid block configs + """ + valid_block_configs = [] + + op_type = op_attrs.get("op") + op_str = op_attrs.get("op_str") + activation = op_attrs.get("activation", "NONE") + upscaling_factor = 1 if op_attrs.get("upscale", "NONE") == "NONE" else 2 + + if output_layout == "NHCWB16": + output_shape = _Shape([1, ofm_shape[1], ofm_shape[3], ofm_channels]) + else: + output_shape = _Shape(ofm_shape) + + # Define search space + max_height = min(output_shape.height, self._max_block_shape.height) + min_height = max(self._micro_block.height, upscaling_factor) + + max_width = min(output_shape.width, self._max_block_shape.width) + min_width = max(self._micro_block.width, upscaling_factor) + + max_depth = min(ofm_channels, self._max_block_shape.depth) + min_depth = max(self._micro_block.depth, upscaling_factor) + + heights = range(min_height, max_height + min_height, min_height) + widths = range(min_width, max_width + min_width, min_width) + depths = range(min_depth, max_depth + min_depth, min_depth) + + # Handle user-forced block config + options = tvm.transform.PassContext.current().config.get("relay.ext.ethos-u.options", None) + forced = False + if options and options.dev_force_block_config: + block_config = [int(v) for v in options.dev_force_block_config.split("x")] + assert len(block_config) == 3 + heights = [block_config[0]] + widths = [block_config[1]] + depths = [block_config[2]] + forced = True + + input_bytewidth = 1 if ifm_dtype == "int8" else 2 + acc_bytewidth = self._get_accumulator_width(op_type, ifm_dtype) + banks_available = self._total_banks - self._reserved_banks + if activation == "LUT" and not self._lut_reserved: + banks_available -= 2 + + # Input block depth has additional limitations for operators that require full input depth + input_block_depth = 0 + partkernel = self.is_partkernel(op_type, ifm_channels, ifm_dtype, kernel_h * kernel_w) + if op_type == "ethosu_conv2d": + if partkernel: + input_block_depth = min(ifm_channels, 16) + else: + input_block_depth = min(ifm_channels, 32) + + for depth in reversed(depths): + if (depth < output_shape.depth) and (depth % self._split_depth != 0) and not forced: + # Block depth has to be less than full depth or a multiple of the split depth + continue + + subkernel_propagator = self._get_subkernel_propagator( + op_attrs, ifm_propagator, input_layout, output_layout, depth + ) + + for width in reversed(widths): + for height in reversed(heights): + if output_layout == "NHCWB16": + output_block = ( + 1, + height, + 1 + ((depth - 1) // 16), + width, + 16, + ) + order = [1, 2, 4, 3, 0] + else: + output_block = (1, height, width, depth) + order = [1, 2, 3, 4] + + offset = [0] * len(output_block) + stripes = [1] * len(output_block) + block_stripe_config = StripeConfig( + output_block, + output_block, + output_block, + order, + stripes, + offset, + ) + + # Propagate output block + input_block = subkernel_propagator.propagate(block_stripe_config) + + input_block_shape = _Shape(input_block.shape, input_layout) + input_block_shape.round_up(self._input_micro_block) + + output_block_shape = _Shape(output_block, output_layout) + + if op_type == "ethosu_conv2d": + input_block_shape.depth = input_block_depth + + # Banks required for input block + input_banks = self._get_input_banks(input_block_shape, input_bytewidth) + # Banks required for accumulation + acc_banks = self._get_accumulator_banks(output_block_shape, acc_bytewidth) + + if (input_banks + acc_banks) <= banks_available: + output_cycles = self._get_output_cycles( + op_type, op_str, ifm_dtype, ofm_dtype, activation + ) + output_cycles *= np.prod(output_block).tolist() + output_cycles = int(math.ceil(output_cycles)) + compute_cycles = self._estimate_compute_cycles_per_block( + op_type, + output_block_shape, + input_block_shape, + kernel_h, + kernel_w, + ifm_channels, + "int8", + partkernel, + ) + block_config = BlockConfig( + input_block_shape.as_list(), output_block, compute_cycles, output_cycles + ) + + if self._disable_block_culling: + # Block culling disabled - add all block configs that fit + valid_block_configs.append(block_config) + else: + # Add block config only if it's not dominated by an existing block. + # A block config is dominated by another if its output_shape is greater + # or equal in every dimension and strictly greater in at least one + # dimension. + dominated = False + for valid_block in valid_block_configs: + if block_config < valid_block: + dominated = True + break + + if not dominated: + valid_block_configs.append(block_config) + + # Every consecutive block in the innermost loop will be dominated by + # this one so break + break + + return valid_block_configs + + def _estimate_compute_cycles_per_block( + self, + op_type: str, + block_shape: _Shape, + input_block_shape: _Shape, + kernel_h: int, + kernel_w: int, + input_channels: int, + ifm_dtype: str, + partkernel: bool = False, + ) -> Tuple[int, int]: + # Calculate the amount of micro blocks per block, per axis + num_quantum_x = _round_up_div(block_shape.width, self._micro_block.width) + num_quantum_y = _round_up_div(block_shape.height, self._micro_block.height) + num_quantum_z = _round_up_div(block_shape.depth, self._micro_block.depth) + num_quantum_xy = num_quantum_x * num_quantum_y + + kernel_steps = self.get_kernel_steps(op_type, kernel_h, kernel_w, ifm_dtype, partkernel) + + wd_cycles = self._get_weight_decoder_cycles(op_type) + delay_cycles = self._get_delay_cycles(op_type, ifm_dtype) + cycle_quantum = 4 + + compute_cycles = 0 + for subkernel_steps in kernel_steps: + subkernel_cycles = 1 if op_type == "ethosu_pooling" else subkernel_steps + compute_cycles += ( + max(wd_cycles, cycle_quantum * num_quantum_xy) * subkernel_cycles * num_quantum_z + ) + + if num_quantum_xy == 1: + if num_quantum_z == 1: + compute_cycles += delay_cycles * subkernel_steps + elif subkernel_steps > 1: + compute_cycles += delay_cycles * (subkernel_steps - 1) * num_quantum_z + + if partkernel: + compute_cycles *= _round_up_div(input_block_shape.depth, 8) + + if op_type == "ethosu_conv2d": + compute_cycles *= _round_up_div(input_channels, input_block_shape.depth) + + return compute_cycles diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/graph.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..ca0d8fef9e16d62d61efad18ad5a4fa39fe9392d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/graph.py @@ -0,0 +1,268 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Graph objects to define compute graphs for the NPU cascader.""" +from typing import List, Dict +from enum import IntEnum +from collections import namedtuple +import numpy as np + +import tvm._ffi +from tvm import te +from tvm.runtime import Object + +from .stripe_config import StripeConfig +from .device_config import EthosuDeviceConfig +from . import _ffi_api + + +# A global store to register matching functions +REGISTERED_MATCHERS = [] + + +TESubgraph = namedtuple("TESubgraph", ["input_tensors", "output_tensor"]) + + +class BufferMode(IntEnum): + RECOMPUTE = 0 + ROLLING = 1 + + +@tvm._ffi.register_object("contrib.ethosu.cascader.PerformanceInfo") +class PerformanceInfo(Object): + """PerformanceInfo class""" + + @property + def compute_cycles(self): + return self._compute_cycles + + @property + def read_bytes(self): + return list(self._read_bytes) + + @property + def write_bytes(self): + return self._write_bytes + + @property + def block_config(self): + return self._block_config + + +@tvm._ffi.register_object("contrib.ethosu.cascader.Tensor") +class Tensor(Object): + """Tensor class""" + + def __init__(self, shape, dtype, is_constant=False, compression_ratio=1): + self.__init_handle_by_constructor__( + _ffi_api.Tensor, shape, dtype, is_constant, compression_ratio + ) + + def add_producer(self, part): + _ffi_api.TensorAddProducer(self, part) + + def add_consumer(self, part): + _ffi_api.TensorAddConsumer(self, part) + + @property + def producers(self): + return list(self._producers) + + @property + def consumers(self): + return list(self._consumers) + + @property + def shape(self): + return list(self._shape) + + @property + def dtype(self): + return self._dtype + + @property + def is_constant(self): + return self._is_constant + + @property + def compression_ratio(self): + return self._compression_ratio + + @property + def size(self): + return self._size + + +class Part(Object): + """Part base class""" + + def set_input(self, index: int, tensor: Tensor): + _ffi_api.PartSetInput(self, index, tensor) + + def set_output(self, tensor: Tensor): + _ffi_api.PartSetOutput(self, tensor) + + def calculate_input_stripe_configs( + self, output_stripe_config: StripeConfig + ) -> List[StripeConfig]: + return list(_ffi_api.PartCalculateInputStripeConfigs(self, output_stripe_config)) + + def get_stripe_align_hint(self) -> List[int]: + return list(_ffi_api.PartGetStripeAlignHint(self)) + + def get_performance_info( + self, stripe_config: StripeConfig, buffer_mode: BufferMode + ) -> PerformanceInfo: + return _ffi_api.PartGetPerformanceInfo(self, stripe_config, buffer_mode) + + @property + def input_tensors(self): + return list(self._input_tensors) + + @property + def output_tensor(self): + return self._output_tensor + + @property + def propagators(self): + return list(self._propagators) + + @property + def in_line(self): + return self._in_line + + @property + def subgraph(self): + return TESubgraph(list(self._te_input_tensors), self._te_output_tensor) + + +@tvm._ffi.register_object("contrib.ethosu.cascader.CascaderGraph") +class CascaderGraph(Object): + """A class to describe a graph of Parts and Tensors used by the cascader. + + This class describes a graph consisting of two object types: Tensors and Parts. + It defines a topological ordering on the graph such that each Part and Tensor has a + position in the ordering. This ordering is used by the Plan and Proposal generation + algorithms. It is also the ordering the Parts are expected to be executed in. + + In addition to defining an ordering, the Parts and Tensors are also all given unique + IDs which they can be referred to by.""" + + def __init__(self, input_tensors: List[Tensor], output_tensors: List[Tensor]): + self.__init_handle_by_constructor__(_ffi_api.CascaderGraph, input_tensors, output_tensors) + + def get_part_id(self, part: Part) -> int: + return _ffi_api.CascaderGraphGetPartID(self, part) + + def get_tensor_id(self, tensor: Tensor) -> int: + return _ffi_api.CascaderGraphGetTensorID(self, tensor) + + @property + def input_tensors(self): + return list(self._input_tensors) + + @property + def output_tensors(self): + return list(self._output_tensors) + + @property + def tensor_order(self): + return list(self._tensor_order) + + @property + def part_order(self): + return list(self._part_order) + + +def register_matcher(matcher): + """Register a match function to the frontend. + + A match function takes a te.Tensor and checks whether it matches + a known operator/operator sequence. If it does, it returns a Part + which models the behaviour of that operator sequence. Otherwise, + it returns None. + """ + REGISTERED_MATCHERS.append(matcher) + return matcher + + +def create_cascader_graph( + te_graph: TESubgraph, const_dict: Dict[int, np.ndarray], device_config: EthosuDeviceConfig +) -> CascaderGraph: + """Create a CascaderGraph from a Tensor Expression graph and constant dictionary. + + Parameters + ---------- + te_graph : TESubgraph + The Tensor Expression graph. + const_dict : Dict[int, np.ndarray] + The constant dictionary. + device_config : EthosuDeviceConfig + Target device configuration. + + Returns + ------- + CascaderGraph + The CascaderGraph. + """ + tensor_map = {} + + def _visit_tensor(tensor): + if tensor not in tensor_map: + is_const = False + # Logic to determine if the tensor is constant + if tensor in list(te_graph.inputs): + i = list(te_graph.inputs).index(tensor) + if i in const_dict: + is_const = True + + # TODO(@mbaret): Calculate the compression ratio + plan_tensor = Tensor( + tensor.shape, + tensor.dtype, + is_constant=is_const, + ) + tensor_map[tensor] = plan_tensor + if isinstance(tensor.op, te.PlaceholderOp) or tensor in te_graph.inputs: + return + + input_tensors = [] + # Check whether any of the registered matchers match the current tensor + for matcher in REGISTERED_MATCHERS: + part = matcher(tensor, device_config) + if part: + input_tensors = part.subgraph.input_tensors + break + + assert part is not None, f"The tensor {tensor} doesn't match any part." + part.set_output(plan_tensor) + plan_tensor.add_producer(part) + for i, input_tensor in enumerate(input_tensors): + _visit_tensor(input_tensor) + part.set_input(i, tensor_map[input_tensor]) + tensor_map[input_tensor].add_consumer(part) + + for output in te_graph.outputs: + _visit_tensor(output) + + input_tensors = [] + for t in te_graph.inputs: + # This is needed because sometimes there are orphaned constants + if t in tensor_map: + input_tensors.append(tensor_map[t]) + + output_tensors = [tensor_map[t] for t in te_graph.outputs] + return CascaderGraph(input_tensors, output_tensors) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/logging.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..0b163eb147e7bd860425db337e3fa86caa0f3bf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/logging.py @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""A class to hold logging information about the cascader""" +from typing import Tuple +import datetime +import json +import os +import math + + +class Logging: + """Cascader logging class""" + + def __init__(self): + self.min_memory_usage = 0 + self.max_memory_usage = 0 + self.min_cycles = 0 + self.max_cycles = 0 + + self.selected_proposal_idx = -1 + self.proposals = {} + self.cascader_runtime = 0 + + def add_proposal(self, idx: int, memory_usage: int, cycles: int): + self.proposals[idx] = {"memory_usage": memory_usage, "cycles": cycles} + + def get_extreme_points(self) -> Tuple[int, int, int, int]: + min_cycles, min_mem_usage = math.inf, math.inf + max_cycles, max_mem_usage = 0, 0 + for proposal in self.proposals.values(): + min_mem_usage = min(proposal["memory_usage"], min_mem_usage) + max_mem_usage = max(proposal["memory_usage"], max_mem_usage) + min_cycles = min(proposal["cycles"], min_cycles) + max_cycles = max(proposal["cycles"], max_cycles) + + return min_mem_usage, max_mem_usage, min_cycles, max_cycles + + def dump_json(self): + min_mem_usage, max_mem_usage, min_cycles, max_cycles = self.get_extreme_points() + with open(os.getcwd() + "/cascader_log.json", "w") as json_file: + print( + json.dumps( + { + "date": f"{datetime.datetime.now()}", + "cascader_runtime": self.cascader_runtime, + "min_cycles": min_cycles, + "max_cycles": max_cycles, + "min_memory_usage": min_mem_usage, + "max_memory_usage": max_mem_usage, + "selected_proposal": self.selected_proposal_idx, + "proposals": self.proposals, + }, + indent=2, + ), + file=json_file, + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/pareto.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/pareto.py new file mode 100644 index 0000000000000000000000000000000000000000..545778934c2c5845a4e6713f3cd843e2c16cd898 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/pareto.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Pareto optimisation functions for the NPU cascader.""" +from typing import List + +from tvm import Object + +from . import _ffi_api +from .plan import Plan + + +def _get_pareto_frontier(costs: List[List[float]]) -> List[bool]: + for i, cost in enumerate(costs): + for j, value in enumerate(cost): + costs[i][j] = float(value) + + return [bool(v) for v in _ffi_api.GetParetoFrontier(costs)] + + +def _thin_vector(vec: List[Object], max_size: int) -> List[Object]: + return list(_ffi_api.ThinVector(vec, max_size)) + + +def _pareto_cull_plans( + plans: List[Plan], max_plans: int, disable_pareto_metric: bool +) -> List[Plan]: + return list(_ffi_api.ParetoCullPlans(plans, max_plans, disable_pareto_metric)) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/parts.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/parts.py new file mode 100644 index 0000000000000000000000000000000000000000..12588799a66a6db21055d3e423d6846db798e9e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/parts.py @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Parts used by the NPU cascader.""" +from typing import List +import tvm._ffi + +from .propagator import Propagator +from .graph import Part, TESubgraph +from .block_config import BlockConfig +from .stripe_config import StripeConfig +from . import _ffi_api + + +@tvm._ffi.register_object("contrib.ethosu.cascader.InlinePart") +class InlinePart(Part): + """InlinePart class""" + + def __init__( + self, + te_subgraph: TESubgraph, + propagators: List[Propagator], + ): + self.__init_handle_by_constructor__( + _ffi_api.InlinePart, + te_subgraph.input_tensors, + te_subgraph.output_tensor, + propagators, + ) + + +@tvm._ffi.register_object("contrib.ethosu.cascader.EthosuPart") +class EthosuPart(Part): + """A class to describe a Part to be executed on an Arm(R) Ethos(TM)-U NPU. + + EthosuParts must be provided with an output quantum and the cycles taken to + compute an output quantum which depend on the operator the NPU is computing.""" + + def __init__( + self, + te_subgraph: TESubgraph, + propagators: List[Propagator], + output_quantum: List[int], + subkernels: int, + valid_block_configs: List[BlockConfig], + weight_tensor_idx: int = -1, + ): + self.__init_handle_by_constructor__( + _ffi_api.EthosuPart, + te_subgraph.input_tensors, + te_subgraph.output_tensor, + propagators, + output_quantum, + subkernels, + valid_block_configs, + weight_tensor_idx, + ) + + def get_block_config(self, stripe_config: StripeConfig) -> BlockConfig: + return _ffi_api.EthosuPartGetBlockConfig(self, stripe_config) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/plan.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/plan.py new file mode 100644 index 0000000000000000000000000000000000000000..f960911ca133705c60679e9e66fce7173e40475e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/plan.py @@ -0,0 +1,167 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Plan class to hold subgraph scheduling information.""" +from typing import Dict, FrozenSet +import tvm._ffi + +from tvm.runtime import Object + +from . import _ffi_api +from .graph import Tensor, Part +from .tensor_config import TensorConfig, MemoryRegion + + +@tvm._ffi.register_object("contrib.ethosu.cascader.Plan") +class Plan(Object): + """ + A class which describes how to schedule a subgraph of Parts together. + + A Plan takes the form of a subgraph of connected Parts (recorded in part_group) with + TensorConfigs for all of the required Tensors (recorded in tensor_configs). This information + can be used to produce a Tensor Expression schedule with inter-operator scheduling. A Plan is + necessarily single-output such that all non-output Parts are 'computed_at'ed the scope of the + output Part. This is what achieves the technique referred to as 'cascading'. A Plan also has + an interior memory region which specifies the region of memory into which all the Plans + intermediate buffers should be allocated. + + Additionally, a Plan contains some other information used during the Plan generation and + selection algorithms. Both the memory and cycles required to run the Plan are accounted for so + that Plans can be ranked and Pareto-culled on these metrics. Furthermore, the TensorConfigs + which are 'open' is recorded indicating that these are valid points to merge with another Plan. + A Plan can only be turned into a schedule if it has no 'open' TensorConfigs - at which point + the Plan is said to be 'closed'. + + Attributes + ---------- + tensor_configs : Dict[Tensor, TensorConfig] + The TensorConfigs specified by the Plan. + open_configs : FrozenSet[TensorConfig] + The TensorConfigs which are 'open' meaning they are a Plan input/output but have + 'interior' state. + output_config : TensorConfig + The TensorConfig of the Plan's output tensor. + part_group : FrozenSet[Part] + The Parts which are covered by the Plan. + interior_region : MemoryRegion + The MemoryRegion in which to store 'interior' Plan buffers. + memory_usage : int + The interior memory used by the Plan in bytes. + cycles : int + The cycles taken to execute the Plan. + + """ + + def __init__( + self, + tensor_configs: Dict[Tensor, TensorConfig], + open_configs: FrozenSet[TensorConfig], + output_config: TensorConfig, + part_group: FrozenSet[Part], + interior_region: MemoryRegion, + memory_usage: int, + cycles: int, + ): + self.__init_handle_by_constructor__( + _ffi_api.Plan, + list(tensor_configs.values()), + list(open_configs), + output_config, + list(part_group), + interior_region, + memory_usage, + cycles, + ) + + def merge(self, other): + """ + Merge two Plans with share an 'open' TensorConfig. + + The current Plan is referred to as the 'upper Plan' and the other Plan as the 'lower + Plan'. The 'open' output config of the upper Plan must be an 'open' input config of the + lower Plan. The Tensor referenced by these configs is the Tensor on which the two Plans + will be merged. The merge process does the following: + + The tensor config maps will be merged with TensorConfigs from the upper Plan taking + priority. The open configs will be merged with the TensorConfigs that are being merged + having been removed. The output config will be that of the lower Plan. The part groups + will be merged. The interior region is necessarily the same for both the upper and lower + Plan. The cycles and memory usage will be summed. + + Parameters + ---------- + other : Plan + The Plan to merge with. + + Return + ------ + Plan + The merged Plan. + + """ + return _ffi_api.PlanMerge(self, other) + + @property + def tensor_configs(self): + """The TensorConfigs specified by the Plan.""" + tensor_configs = {} + for config in self._tensor_configs: + tensor_configs[config.tensor] = config + return tensor_configs + + @property + def open_configs(self): + """ + The TensorConfigs which are 'open' meaning they are a Plan input/output but have + 'interior' state. + """ + return frozenset(self._open_configs) + + @property + def output_config(self): + """The TensorConfig of the Plan's output tensor.""" + return self._output_config + + @property + def part_group(self): + """The Parts which are covered by the Plan.""" + return frozenset(self._part_group) + + @property + def interior_region(self): + """The MemoryRegion in which to store 'interior' Plan buffers.""" + return self._interior_region + + @property + def memory_usage(self): + """The interior memory used by the Plan in bytes.""" + return self._memory_usage + + @property + def cycles(self): + """The cycles taken to execute the Plan.""" + return self._cycles + + def __repr__(self): + return ( + f"Plan(tensor_configs={self.tensor_configs}, " + f"open_configs={self.open_configs}, " + f"output_config={self.output_config}, " + f"part_group={self.part_group}, " + f"interior_region={self.interior_region.name}, " + f"memory_usage={self.memory_usage}, " + f"cycles={self.cycles}, " + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/plan_generator.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/plan_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..ed29ff4b591919428bdcaaf55abe707c1d485b61 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/plan_generator.py @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Algorithms to generate Plans for a CascaderGraph.""" +from typing import List, Dict, Tuple + +from tvm.contrib.ethosu.cascader.tensor_config import MemoryRegion, TensorConfig + +from . import _ffi_api +from .cascader_options import CascaderOptions +from .plan import Plan +from .stripe_config import StripeConfig +from .graph import CascaderGraph, Part, Tensor + + +def _generate_output_stripe_configs( + part: Part, stripe_factors: int, enable_striping: bool, multi_dimensional: bool +) -> List[StripeConfig]: + return list( + _ffi_api.GenerateOutputStripeConfigs( + part, stripe_factors, enable_striping, multi_dimensional + ) + ) + + +def _generate_single_plans( + part: Part, + output_stripe_configs: List[StripeConfig], + home_map: Dict[Tensor, List[MemoryRegion]], + cascade_region: MemoryRegion, +) -> List[Plan]: + return list(_ffi_api.GenerateSinglePlans(part, output_stripe_configs, home_map, cascade_region)) + + +def _generate_graph_plans( + graph: CascaderGraph, + home_map: Dict[Tensor, List[MemoryRegion]], + options: CascaderOptions, +): + return _ffi_api.GenerateGraphPlans( + graph, + home_map, + options, + ) + + +def get_copy_cycles_hint(tensor_config: TensorConfig) -> Tuple[int, int]: + """ + Returns a hint estimating the number of cycles for the copy + specified by tensor_config. + + Parameters + ---------- + tensor_config : TensorConfig + The tensor configuration to estimate. + + Returns + ------- + mem2mem_cycles : int + Total estimated cycles. + initial_mem2mem_cycles : int + Estimated cycles for the first block. + """ + return _ffi_api.GetCopyCyclesHint(tensor_config) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/propagator.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/propagator.py new file mode 100644 index 0000000000000000000000000000000000000000..636c265923cc6974a0ce97e4f00f5cba7d2a5dfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/propagator.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Propagator class.""" +# pylint: disable=invalid-name +import tvm._ffi + +from tvm.runtime import Object + +from . import _ffi_api + + +@tvm._ffi.register_object("contrib.ethosu.cascader.Propagator") +class Propagator(Object): + """Propagator class""" + + def __init__(self, transform, offset): + float_transform = list([list(float(v) for v in row) for row in transform]) + self.__init_handle_by_constructor__(_ffi_api.Propagator, float_transform, offset) + + def propagate(self, stripe_config): + return _ffi_api.PropagatorPropagate(self, stripe_config) + + @property + def transform(self): + """Get the transform matrix""" + new_matrix = [] + for row in self._transform: + new_row = [] + for v in row: + new_row.append(v.value) + + new_matrix.append(new_row) + + return new_matrix + + @property + def offset(self): + """Get the offset matrix""" + new_vec = [] + for v in self._offset: + new_vec.append(v.value) + + return new_vec diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/proposal.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/proposal.py new file mode 100644 index 0000000000000000000000000000000000000000..13184108120e9394c8ac6a1cd778f52af820c5ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ethosu/cascader/proposal.py @@ -0,0 +1,106 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Proposal class to hold graph scheduling information.""" +from typing import Dict, FrozenSet, List +import tvm._ffi +from tvm.contrib.ethosu.cascader.plan import Plan + +from tvm.runtime import Object + +from . import _ffi_api +from .graph import Tensor, Part, CascaderGraph +from .tensor_config import TensorConfig, MemoryRegion + + +@tvm._ffi.register_object("contrib.ethosu.cascader.Proposal") +class Proposal(Object): + """A class which describes how to schedule a CascaderGraph as a series of disjoint Plans. + + Attributes + ---------- + graph : CascaderGraph + The CascaderGraph to which the Proposal applies. + part_group : FrozenSet[Part] + The Parts which are covered by the Proposal. + plans : List[Plan] + The Plans used in the Proposal. + input_tensor_configs : Dict[Tensor, TensorConfig] + The TensorConfigs indexed by Tensor in the Proposal which aren't produced by a Plan. + cascade_region : MemoryRegion + The MemoryRegion where cascading buffers should be homed. + memory_usage : int + The memory required to execute the Proposal in the cascading MemoryRegion. + cycles : int + The estimated cycles taken to execute the Proposal. + + """ + + def __init__( + self, + graph: CascaderGraph, + part_group: FrozenSet[Part], + plans: List[Plan], + input_tensor_configs: Dict[Tensor, TensorConfig], + cascade_region: MemoryRegion, + memory_usage: Dict[MemoryRegion, int], + cycles: int, + ): + self.__init_handle_by_constructor__( + _ffi_api.Proposal, + graph, + list(part_group), + plans, + input_tensor_configs, + cascade_region, + memory_usage, + cycles, + ) + + @property + def graph(self) -> CascaderGraph: + """The CascaderGraph to which the Proposal applies.""" + return self._graph + + @property + def part_group(self) -> FrozenSet[Part]: + """The Parts which are covered by the Proposal.""" + return frozenset(self._part_group) + + @property + def plans(self) -> List[Plan]: + """The Plans used in the Proposal.""" + return list(self._plans) + + @property + def input_tensor_configs(self) -> Dict[Tensor, TensorConfig]: + """The TensorConfigs indexed by Tensor in the Proposal which aren't produced by a Plan.""" + return dict(self._input_tensor_configs) + + @property + def cascade_region(self) -> MemoryRegion: + """The MemoryRegion where cascading buffers should be homed.""" + return self._cascade_region + + @property + def memory_usage(self) -> int: + """The memory required to execute the Proposal in the cascading MemoryRegion.""" + return int(self._memory_usage) + + @property + def cycles(self) -> int: + """The estimated cycles taken to execute the Proposal.""" + return int(self._cycles) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/graph_executor.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/graph_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..ab94f203c231c50dc7c77607d1769f559f8245d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/graph_executor.py @@ -0,0 +1,514 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Minimum graph executor that executes graph containing TVM PackedFunc.""" +import numpy as np +import tvm._ffi + +from tvm.rpc import _ffi_api as _rpc_ffi_api +from tvm.rpc import base as rpc_base +from tvm._ffi.base import string_types +from tvm._ffi.runtime_ctypes import Device + + +def create(graph_json_str, libmod, device): + """Create a runtime executor module given a graph and module. + + Parameters + ---------- + graph_json_str : str + The graph to be deployed in json format output by json graph. + The graph can contain operator(tvm_op) that points to the name + of PackedFunc in the libmod. + + libmod : tvm.runtime.Module + The module of the corresponding function + + device : Device or list of Device + The device to deploy the module. It can be local or remote when there + is only one Device. Otherwise, the first device in the list will + be used as this purpose. All device should be given for heterogeneous + execution. + + Returns + ------- + graph_module : GraphModule + Runtime graph module that can be used to execute the graph. + + Note + ---- + See also :py:class:`tvm.contrib.graph_executor.GraphModule` + for examples to directly construct a GraphModule from an exported + relay compiled library. + """ + assert isinstance(graph_json_str, string_types) + + dev, num_rpc_dev, device_type_id = get_device(libmod, device) + + if num_rpc_dev == len(dev): + fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor.create") + else: + fcreate = tvm._ffi.get_global_func("tvm.graph_executor.create") + + return GraphModule(fcreate(graph_json_str, libmod, *device_type_id)) + + +def get_device(libmod, device): + """Parse and validate all the device(s). + + Parameters + ---------- + libmod : tvm.runtime.Module + The module of the corresponding function + + device : Device or list of Device + + Returns + ------- + device : list of Device + num_rpc_dev : Number of rpc devices + device_type_id : List of device type and device id + """ + + if isinstance(device, Device): + device = [device] + elif not isinstance(device, (list, tuple)): + raise ValueError("dev has to be the type of Device or a list of Device") + for cur_dev in device: + if not isinstance(cur_dev, Device): + raise ValueError("dev has to be the type of Device or a list of Device") + + # device_type_id[0], device_type_id[1] are used as the primary/fallback + # device type and id. All other ones are used as device for + # heterogeneous execution. + num_rpc_dev = 0 + device_type_id = [] + for cur_dev in device: + device_type = cur_dev.device_type + if device_type >= rpc_base.RPC_SESS_MASK: + assert libmod.type_key == "rpc" + assert _rpc_ffi_api.SessTableIndex(libmod) == cur_dev._rpc_sess._tbl_index + num_rpc_dev += 1 + device_type = cur_dev.device_type % rpc_base.RPC_SESS_MASK + device_type_id.append(device_type) + device_type_id.append(cur_dev.device_id) + + if 0 < num_rpc_dev < len(device): + raise ValueError("Either all or none of the devices should be rpc.") + return device, num_rpc_dev, device_type_id + + +class GraphModule(object): + """Wrapper runtime module. + + This is a thin wrapper of the underlying TVM module. + you can also directly call set_input, run, and get_output + of underlying module functions + + Parameters + ---------- + module : tvm.runtime.Module + The internal tvm module that holds the actual graph functions. + + Attributes + ---------- + module : tvm.runtime.Module + The internal tvm module that holds the actual graph functions. + + Examples + -------- + + .. code-block:: python + + import tvm + from tvm import relay + from tvm.contrib import graph_executor + + # build the library using graph executor + lib = relay.build(...) + lib.export_library("compiled_lib.so") + # load it back as a runtime + lib: tvm.runtime.Module = tvm.runtime.load_module("compiled_lib.so") + # Call the library factory function for default and create + # a new runtime.Module, wrap with graph module. + gmod = graph_executor.GraphModule(lib["default"](dev)) + # use the graph module. + gmod.set_input("x", data) + gmod.run() + """ + + def __init__(self, module): + self.module = module + self._set_input = module["set_input"] + + # TODO(shingjan): The graph_executor in C doesn't have + # set_input/output_zero_copy implemented. + try: + self._set_input_zero_copy = module["set_input_zero_copy"] + except AttributeError: + self._set_input_zero_copy = lambda *_: (_ for _ in ()).throw( + Exception("set_input_zero_copy is not implemented for C graph executor") + ) + try: + self._set_output_zero_copy = module["set_output_zero_copy"] + except AttributeError: + self._set_output_zero_copy = lambda *_: (_ for _ in ()).throw( + Exception("set_output_zero_copy is not implemented for C graph executor") + ) + self._run = module["run"] + self._get_output = module["get_output"] + self._get_input = module["get_input"] + self._get_num_outputs = module["get_num_outputs"] + self._get_input_index = module["get_input_index"] + self._get_input_info = module["get_input_info"] + self._get_num_inputs = module["get_num_inputs"] + self._load_params = module["load_params"] + self._share_params = module["share_params"] + + def set_input(self, key=None, value=None, **params): + """Set inputs to the module via kwargs + + Parameters + ---------- + key : int or str + The input key + + value : the input value. + The input value + + params : dict of str to NDArray + Additional arguments + """ + if key is not None: + v = self._get_input(key) + if v is None: + raise RuntimeError(f"Could not find '{key}' in graph's inputs") + v.copyfrom(value) + + if params: + # upload big arrays first to avoid memory issue in rpc mode + keys = list(params.keys()) + keys.sort(key=lambda x: -np.prod(params[x].shape)) + for k in keys: + # TODO(zhiics) Skip the weights for submodule in a better way. + # We should use ConstLoaderModule for initialization and remove + # params from set_input + val = self._get_input(k) + if val: + self._get_input(k).copyfrom(params[k]) + + def set_input_zero_copy(self, key=None, value=None, **params): + """Set inputs to the module via kwargs with zero memory copy + + Parameters + ---------- + key : int or str + The input key + + value : the input value in DLPack + The input value + + params : dict of str to NDArray + Additional arguments + """ + if key is not None: + self._set_input_zero_copy(key, value) + + if params: + keys = list(params.keys()) + + for k in keys: + # TODO(zhiics) Skip the weights for submodule in a better way. + # We should use ConstLoaderModule for initialization and remove + # params from set_input + val = self._get_input(k) + if val: + self._set_input_zero_copy(k, params[k]) + + def set_output_zero_copy(self, key, value): + """Set outputs to the module with zero memory copy + + Parameters + ---------- + key : int or str + The output key + + value : the output value in DLPack + The output value + """ + self._set_output_zero_copy(key, value) + + def run(self, **input_dict): + """Run forward execution of the graph + + Parameters + ---------- + input_dict: dict of str to NDArray + List of input values to be feed to + """ + if input_dict: + self.set_input(**input_dict) + self._run() + + def get_num_outputs(self): + """Get the number of outputs from the graph + + Returns + ------- + count : int + The number of outputs. + """ + return self._get_num_outputs() + + def get_num_inputs(self): + """Get the number of inputs to the graph + + Returns + ------- + count : int + The number of inputs. + """ + return self._get_num_inputs() + + def get_input(self, index, out=None): + """Get index-th input to out + + Parameters + ---------- + index : int + The input index + + out : NDArray + The output array container + """ + if out: + self._get_input(index).copyto(out) + return out + + return self._get_input(index) + + def get_input_index(self, name): + """Get inputs index via input name. + + Parameters + ---------- + name : str + The input key name + + Returns + ------- + index: int + The input index. -1 will be returned if the given input name is not found. + """ + return self._get_input_index(name) + + def get_input_info(self): + """Return the 'shape' and 'dtype' dictionaries of the graph. + + .. note:: + We can't simply get the input tensors from a TVM graph + because weight tensors are treated equivalently. Therefore, to + find the input tensors we look at the 'arg_nodes' in the graph + (which are either weights or inputs) and check which ones don't + appear in the params (where the weights are stored). These nodes + are therefore inferred to be input tensors. + + Returns + ------- + shape_dict : Map + Shape dictionary - {input_name: tuple}. + dtype_dict : Map + dtype dictionary - {input_name: dtype}. + """ + input_info = self._get_input_info() + assert "shape" in input_info + shape_dict = input_info["shape"] + assert "dtype" in input_info + dtype_dict = input_info["dtype"] + + return shape_dict, dtype_dict + + def get_output(self, index, out=None): + """Get index-th output to out + + Parameters + ---------- + index : int + The output index + + out : NDArray + The output array container + """ + if out: + self._get_output(index, out) + return out + + return self._get_output(index) + + def debug_get_output(self, node, out): + """Run graph up to node and get the output to out + + Parameters + ---------- + node : int / str + The node index or name + + out : NDArray + The output array container + """ + raise NotImplementedError("Please use debugger.debug_executor as graph_executor instead.") + + def load_params(self, params_bytes): + """Load parameters from serialized byte array of parameter dict. + + Parameters + ---------- + params_bytes : bytearray + The serialized parameter dict. + """ + self._load_params(bytearray(params_bytes)) + + def share_params(self, other, params_bytes): + """Share parameters from pre-existing GraphExecutor instance. + + Parameters + ---------- + other: GraphExecutor + The parent GraphExecutor from which this instance should share + it's parameters. + params_bytes : bytearray + The serialized parameter dict (used only for the parameter names). + """ + self._share_params(other.module, bytearray(params_bytes)) + + def __getitem__(self, key): + """Get internal module function + + Parameters + ---------- + key : str + The key to the module. + """ + return self.module[key] + + def benchmark( + self, + device, + func_name="run", + repeat=5, + number=5, + min_repeat_ms=None, + limit_zero_time_iterations=100, + end_to_end=False, + cooldown_interval_ms=0, + repeats_to_cooldown=1, + **kwargs, + ): + """Calculate runtime of a function by repeatedly calling it. + + Use this function to get an accurate measurement of the runtime of a function. The function + is run multiple times in order to account for variability in measurements, processor speed + or other external factors. Mean, median, standard deviation, min and max runtime are all + reported. On GPUs, CUDA and ROCm specifically, special on-device timers are used so that + synchonization and data transfer operations are not counted towards the runtime. This allows + for fair comparison of runtimes across different functions and models. The `end_to_end` flag + switches this behavior to include data transfer operations in the runtime. + + The benchmarking loop looks approximately like so: + + .. code-block:: python + + for r in range(repeat): + time_start = now() + for n in range(number): + func_name() + time_end = now() + total_times.append((time_end - time_start)/number) + + + Parameters + ---------- + func_name : str + The function to benchmark. This is ignored if `end_to_end` is true. + + repeat : int + Number of times to run the outer loop of the timing code (see above). The output will + contain `repeat` number of datapoints. + + number : int + Number of times to run the inner loop of the timing code. This inner loop is run in + between the timer starting and stopping. In order to amortize any timing overhead, + `number` should be increased when the runtime of the function is small (less than a 1/10 + of a millisecond). + + min_repeat_ms : Optional[int] + If set, the inner loop will be run until it takes longer than `min_repeat_ms` + milliseconds. This can be used to ensure that the function is run enough to get an + accurate measurement. + + limit_zero_time_iterations : Optional[int] + The maximum number of repeats when measured time is equal to 0. + It helps to avoid hanging during measurements. + + end_to_end : bool + If set, include time to transfer input tensors to the device and time to transfer + returned tensors in the total runtime. This will give accurate timings for end to end + workloads. + + cooldown_interval_ms: Optional[int] + The cooldown interval in milliseconds between the number of repeats defined by + `repeats_to_cooldown`. + + repeats_to_cooldown: Optional[int] + The number of repeats before the cooldown is activated. + + kwargs : Dict[str, Object] + Named arguments to the function. These are cached before running timing code, so that + data transfer costs are not counted in the runtime. + + Returns + ------- + timing_results : BenchmarkResult + Runtimes of the function. Use `.mean` to access the mean runtime, use `.results` to + access the individual runtimes (in seconds). + """ + min_repeat_ms = 0 if min_repeat_ms is None else min_repeat_ms + if end_to_end: + # Have to unpack kwargs into a single list + args = [] + for k, v in kwargs.items(): + args.append(k) + args.append(v) + return self.module.time_evaluator( + "run_from_inputs", + device, + repeat=repeat, + number=number, + min_repeat_ms=min_repeat_ms, + limit_zero_time_iterations=limit_zero_time_iterations, + )(device.device_type % rpc_base.RPC_SESS_MASK, device.device_id, *args) + if kwargs: + self.set_input(**kwargs) + return self.module.time_evaluator( + func_name, + device, + repeat=repeat, + number=number, + min_repeat_ms=min_repeat_ms, + limit_zero_time_iterations=limit_zero_time_iterations, + cooldown_interval_ms=cooldown_interval_ms, + repeats_to_cooldown=repeats_to_cooldown, + )() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/graph_runtime.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/graph_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..f8ecfdd70a5bb74ec9256ac8cc322d7464f3cf6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/graph_runtime.py @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Deprecated Python API for GraphExecutor.""" + +import warnings + +from . import graph_executor + + +def create(*args, **kwargs): + warnings.warn( + "This function has been moved to tvm.contrib.graph_executor and will be removed " + "in the next TVM release" + ) + return graph_executor.create(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hexagon/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hexagon/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2e4bbdd794595628c91c8c1bae2f63f53b3bdfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hexagon/__init__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Hexagon APIs.""" + +from .tools import * +from .transform import * diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hexagon/_ci_env_check.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hexagon/_ci_env_check.py new file mode 100644 index 0000000000000000000000000000000000000000..c1c70750e86ae52413dd36f2c3be4d26cd1e568b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hexagon/_ci_env_check.py @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Hexagon environment checks for CI usage + +These may be required by either tvm.testing or +tvm.contrib.hexagon.pytest_plugin, and are separated here to avoid a +circular dependency. +""" + +import os + +import tvm + +ANDROID_SERIAL_NUMBER = "ANDROID_SERIAL_NUMBER" +HEXAGON_TOOLCHAIN = "HEXAGON_TOOLCHAIN" + + +def _compile_time_check(): + """Return True if compile-time support for Hexagon is present, otherwise + error string. + + Designed for use as a the ``compile_time_check`` argument to + `tvm.testing.Feature`. + """ + if ( + tvm.testing.utils._cmake_flag_enabled("USE_LLVM") + and tvm.target.codegen.llvm_version_major() < 7 + ): + return "Hexagon requires LLVM 7 or later" + + if "HEXAGON_TOOLCHAIN" not in os.environ: + return f"Missing environment variable {HEXAGON_TOOLCHAIN}." + + return True + + +def _run_time_check(): + """Return True if run-time support for Hexagon is present, otherwise + error string. + + Designed for use as a the ``run_time_check`` argument to + `tvm.testing.Feature`. + """ + if ANDROID_SERIAL_NUMBER not in os.environ: + return f"Missing environment variable {ANDROID_SERIAL_NUMBER}." + + return True diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hipcc.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hipcc.py new file mode 100644 index 0000000000000000000000000000000000000000..9e0d88bcc74356b8bd59d0760390f7191b19744a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/hipcc.py @@ -0,0 +1,113 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Utility to invoke hipcc compiler in the system""" +from __future__ import absolute_import as _abs + +import subprocess +import os +import warnings + +import tvm._ffi +from tvm.target import Target + +from . import utils +from .._ffi.base import py_str +from .rocm import get_rocm_arch, find_rocm_path + + +def compile_hip(code, target_format="hsaco", arch=None, options=None, path_target=None, verbose=False): + """Compile HIP code with hipcc. + + Parameters + ---------- + code : str + The HIP code. + + target_format : str + The target format of hipcc compiler. + + arch : str + The AMD GPU architecture. + + options : str or list of str + The additional options. + + path_target : str, optional + Output file. + + Return + ------ + hsaco : bytearray + The bytearray of the hsaco + """ + if arch is None: + rocm_path = find_rocm_path() + arch = get_rocm_arch(rocm_path) + + temp = utils.tempdir() + if target_format not in ["hsaco"]: + raise ValueError("target_format must be hsaco") + temp_code = temp.relpath("my_kernel.cc") + temp_target = temp.relpath("my_kernel.%s" % target_format) + + with open(temp_code, "w") as out_file: + out_file.write(code) + + file_target = path_target if path_target else temp_target + cmd = ["hipcc"] + cmd += ["-O3", '-c'] + if isinstance(arch, str): + cmd += [f"--offload-arch={arch}"] + if target_format == "hsaco": + cmd += ["--genco"] + if options: + if isinstance(options, str): + cmd += [options] + elif isinstance(options, list): + cmd += options + else: + raise ValueError("options must be str or list of str") + + cmd += ["-o", file_target] + cmd += [temp_code] + + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + (out, _) = proc.communicate() + if verbose: + print(py_str(out)) + + if proc.returncode != 0: + msg = code + msg += "\nCompilation error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + with open(file_target, "rb") as f: + data = bytearray(f.read()) + if not data: + raise RuntimeError("Compilation error: empty result is generated") + return data + + +@tvm._ffi.register_func +def tvm_callback_hip_compile(code, target): + """use hipcc to generate fatbin code for better optimization""" + hsaco = compile_hip(code, target_format="hsaco") + return hsaco \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/miopen.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/miopen.py new file mode 100644 index 0000000000000000000000000000000000000000..0e336c1c82b978ef4d4462a7bd9a6a1842bcd8d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/miopen.py @@ -0,0 +1,190 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to MIOpen library.""" +# pylint: disable-msg=C0103 +import ctypes +import numpy as np +import tvm +import tvm._ffi + +from tvm import te + + +def _get_np_int32_array_handle(arr): + """Return a void_p handle for a numpy array + + Parameters + ---------- + arr: numpy.NDArray + source numpy array + + Returns + ------- + ptr: ctypes.c_void_p + pointer to the data + """ + assert arr.dtype == np.int32 + ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) + return ctypes.cast(ptr, ctypes.c_void_p) + + +def conv2d_forward( + x, + w, + stride_h=1, + stride_w=1, + pad_h=0, + pad_w=0, + dilation_h=1, + dilation_w=1, + conv_mode=0, + data_type=1, + group_count=1, +): + """Create an extern op that compute 2D convolution with MIOpen + + Parameters + ---------- + x: Tensor + input feature map + w: Tensor + convolution weight + stride_h: int + height stride + stride_w: int + width stride + pad_h: int + height pad + pad_w: int + weight pad + dilation_h: int + height dilation + dilation_w: int + width dilation + conv_mode: int + 0: miopenConvolution + 1: miopenTranspose + data_type: int + 0: miopenHalf (fp16) + 1: miopenFloat (fp32) + group_count: int + number of groups + Returns + ------- + y: Tensor + The result tensor + """ + assert 0 <= conv_mode <= 2, "0: miopenConvolution / 1: miopenTranspose / 2: miopenGroupConv" + if group_count > 1: + conv_mode = 2 + oshape = np.zeros((len(x.shape)), dtype=np.int32) + xshape = x.shape + wshape = w.shape + setup_func = tvm._ffi.get_global_func("tvm.contrib.miopen.conv2d.setup") + algo = setup_func( + conv_mode, + data_type, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + xshape[0].value, + xshape[1].value, + xshape[2].value, + xshape[3].value, + wshape[0].value, + wshape[1].value, + wshape[2].value, + wshape[3].value, + group_count, + _get_np_int32_array_handle(oshape), + ) + + return te.extern( + list(oshape), + [x, w], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.miopen.conv2d.forward", + conv_mode, + data_type, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + algo, + ins[0], + ins[1], + outs[0], + ), + name="y", + ) + + +def softmax(x, axis=-1): + """Compute softmax with MIOpen + + Parameters + ---------- + x : tvm.te.Tensor + The input tensor + + axis : int + The axis to compute softmax over + + Returns + ------- + ret : tvm.te.Tensor + The result tensor + """ + return te.extern( + x.shape, + [x], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.miopen.softmax.forward", ins[0], outs[0], axis + ), + name="y", + ) + + +def log_softmax(x, axis=-1): + """Compute log softmax with MIOpen + + Parameters + ---------- + x : tvm.te.Tensor + The input tensor + + axis : int + The axis to compute log softmax over + + Returns + ------- + ret : tvm.te.Tensor + The result tensor + """ + return te.extern( + x.shape, + [x], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.miopen.log_softmax.forward", ins[0], outs[0], axis + ), + name="y", + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mkl.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mkl.py new file mode 100644 index 0000000000000000000000000000000000000000..449d660c9027a1dd550c10532e42f0aabab8e96f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mkl.py @@ -0,0 +1,126 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to BLAS libraries.""" +import tvm +from tvm import te + + +def matmul(lhs, rhs, transa=False, transb=False, **kwargs): + """Create an extern op that compute matrix mult of A and rhs with CrhsLAS + This function serves as an example on how to call external libraries. + + Parameters + ---------- + lhs: Tensor + The left matrix operand + rhs: Tensor + The right matrix operand + transa: bool + Whether transpose lhs + transb: bool + Whether transpose rhs + + Returns + ------- + C: Tensor + The result tensor. + """ + n = lhs.shape[1] if transa else lhs.shape[0] + m = rhs.shape[0] if transb else rhs.shape[1] + return te.extern( + (n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.mkl.matmul", ins[0], ins[1], outs[0], transa, transb + ), + name="C", + **kwargs, + ) + + +def matmul_u8s8s32(lhs, rhs, transa=False, transb=False, **kwargs): + """Create an extern op that compute matrix mult of A and rhs with CrhsLAS + This function serves as an example on how to call external libraries. + + Parameters + ---------- + lhs: Tensor + The left matrix operand + rhs: Tensor + The right matrix operand + transa: bool + Whether transpose lhs + transb: bool + Whether transpose rhs + + Returns + ------- + C: Tensor + The result tensor. + """ + n = lhs.shape[1] if transa else lhs.shape[0] + m = rhs.shape[0] if transb else rhs.shape[1] + return te.extern( + (n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.mkl.matmul_u8s8s32", ins[0], ins[1], outs[0], transa, transb + ), + name="C", + **kwargs, + ) + + +def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs): + """Create an extern op that compute batched matrix mult of A and rhs with mkl + This function serves as an example on how to call external libraries. + + Parameters + ---------- + lhs: Tensor + The left matrix operand + rhs: Tensor + The right matrix operand + transa: bool + Whether transpose lhs + transb: bool + Whether transpose rhs + + Returns + ------- + C: Tensor + The result tensor. + """ + b = te.max(lhs.shape[0], rhs.shape[0]) + n = lhs.shape[2] if transa else lhs.shape[1] + m = rhs.shape[1] if transb else rhs.shape[2] + return te.extern( + (b, n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.mkl.batch_matmul" + if not iterative + else "tvm.contrib.mkl.batch_matmul_iterative", + ins[0], + ins[1], + outs[0], + transa, + transb, + ), + name="C", + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mps.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mps.py new file mode 100644 index 0000000000000000000000000000000000000000..eb8ad777964b61525fbdc7988284910570fbf5d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mps.py @@ -0,0 +1,95 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to MPS libraries.""" +import tvm +from tvm import te + + +# pylint: disable=C0103,W0612 + + +def matmul(lhs, rhs, transa=False, transb=False): + """Create an extern op that compute matrix mult of A and rhs with CrhsLAS + + This function serves as an example on how to calle external libraries. + + Parameters + ---------- + lhs : Tensor + The left matrix operand + rhs : Tensor + The right matrix operand + transa : bool + Whether transpose lhs + transb : bool + Whether transpose rhs + + Returns + ------- + C : Tensor + The result tensor. + """ + m = lhs.shape[0] if transa is False else lhs.shape[1] + n = rhs.shape[1] if transb is False else rhs.shape[0] + if transa: + m = b + if transb: + n = c + return te.extern( + (m, n), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.mps.matmul", ins[0], ins[1], outs[0], transa, transb + ), + name="C", + ) + + +def conv2d(data, weight, pad="SAME", stride=1): + """ + Create an extern op that compute data * weight and return result in output + + Parameters: + ---------- + data: Tensor + The input data, format NHWC + weight: Tensor + The conv weight, format output_feature * kH * kW * input_feature + pad: str + Padding method, 'SAME' or 'VALID' + stride: int + convolution stride + + Returns + ------- + output: Tensor + The result tensor + """ + n, hi, wi, ci = data.shape + co, kh, kw, ciw = weight.shape + padding = 0 if pad == "SAME" else 1 + ho = hi // stride + wo = wi // stride + + return te.extern( + (n, ho, wo, co), + [data, weight], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.mps.conv2d", ins[0], ins[1], outs[0], padding, stride + ), + name="C", + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mrvl.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mrvl.py new file mode 100644 index 0000000000000000000000000000000000000000..7004bb5b9db6e80063eeb69dc0efcd05cfc40bd9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mrvl.py @@ -0,0 +1,457 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, unused-argument, broad-except +"""Utility to compile Marvell models""" + +import os +import json +import shutil +import tempfile +import base64 +import numpy as np +import tvm +import tvm._ffi + + +@tvm._ffi.register_func("tvm.mrvl.find_value_in_KV_pair") +def find_value_in_KV_pair(json_input: str, key_to_find: str) -> str: + """This function takes the graph_json string and key to be searched in + the json string, using json parser routine it loads the json string + and access the value using the given key. It raises exception if the + key is not found in the input json string. + + Parameters + ---------- + graph_json: String + This is the graph_json string + + Returns + ------- + value_string: string + This returns the value string for the given key string + """ + value = "" + try: + json_dict = json.loads(json_input) + value = json_dict[key_to_find] + except KeyError: + assert False, "Marvell-Compiler-ERROR-Internal:: Could not find matching key in json" + + return value + + +@tvm._ffi.register_func("tvm.mrvl.GetNodesJSONString") +def get_nodes_json_string(graph_json): + """This takes the graph_json string from MrvlJSONSerializer and adds / modifies + the json string to a form suitable for the Marvell Backend. + + Parameters + ---------- + graph_json: String + This is the graph_json string from the MrvlJSONSerializer + + Returns + ------- + nodes_json_string: string + This returns the nodes_json string which can be accepted by the Marvell backend. + """ + + dictionary = json.loads(graph_json) + # Add Marvell Index and rename "op" and "name" fields + mrvl_idx = 1 + num_in = 0 + for iterator in dictionary["nodes"]: + if iterator["op"] == "kernel": + iterator["op"] = "tvm_op" + iterator["attrs"]["mrvl_nodes_idx"] = [mrvl_idx] + iterator["attrs"]["kernel_const"] = {} + iterator["attrs"]["bias_const"] = {} + iterator["attrs"]["beta_const"] = {} + iterator["attrs"]["gamma_const"] = {} + iterator["attrs"]["var_const"] = {} + iterator["attrs"]["mean_const"] = {} + iterator["name"] = "tvmgen_mrvl_main" + "_" + str(mrvl_idx - 1) + mrvl_idx = mrvl_idx + 1 + if iterator["op"] == "input": + iterator["attrs"]["layer_name"] = ["input"] + iterator["inputs"] = [] + in_id = iterator["name"].split("_i")[-1] + iterator["input_id"] = [in_id] + iterator["attrs"]["dtype"] = iterator["attrs"]["dtype"][0] + iterator["attrs"]["shape"] = iterator["attrs"]["shape"][0] + if len(iterator["attrs"]["shape"][0]) == 2: + iterator["attrs"]["data_layout"] = ["NC"] + else: + iterator["attrs"]["data_layout"] = ["NCHW"] + # Infer Batch Size from the input shape + batch_size = iterator["attrs"]["shape"][0][0] + dictionary["batch_size"] = f"{batch_size}" + num_in = num_in + 1 + + # Create a new inputs to store only the previous node input and not the const inputs + for iterator in dictionary["nodes"]: + if iterator["op"] == "tvm_op": + list_prev = [] + for prev in iterator["inputs"]: + if dictionary["nodes"][prev[0]]["op"] == "tvm_op": + mrvl_idx_prev = dictionary["nodes"][prev[0]]["attrs"]["mrvl_nodes_idx"][0] + list_prev.append([mrvl_idx_prev + num_in - 1, 0, 0]) + if dictionary["nodes"][prev[0]]["op"] == "input": + idx_in = int(dictionary["nodes"][prev[0]]["input_id"][0]) + list_prev.append([idx_in, 0, 0]) + iterator["node_prev"] = list_prev + + for iterator in dictionary["nodes"]: + if iterator["op"] == "tvm_op": + del iterator["inputs"] + + for iterator in dictionary["nodes"]: + if iterator["op"] == "tvm_op": + iterator["inputs"] = iterator["node_prev"] + + for iterator in dictionary["nodes"]: + if iterator["op"] == "tvm_op": + del iterator["node_prev"] + + # Remove unneeded fields + del dictionary["node_row_ptr"] + + # Patch up arg_nodes and heads to remove references to constant inputs + list_nodes = dictionary["arg_nodes"] + list_nodes_updated = [] + + for iterator in list_nodes: + if dictionary["nodes"][iterator]["op"] != "const": + if dictionary["nodes"][iterator]["op"] == "input": + input_name = dictionary["nodes"][iterator]["name"] + input_num_str = input_name.split("_i", 1)[1] + input_num = int(input_num_str) + list_nodes_updated.append(input_num) + else: + list_nodes_updated.append( + dictionary["nodes"][iterator]["attrs"]["mrvl_nodes_idx"][0] + ) + dictionary["arg_nodes"] = list_nodes_updated + + # Add additional data required by the runtime such as number of inputs + # and number of outputs to the subgraph + num_subgraph_inputs = str(len(list_nodes_updated)) + dictionary["num_subgraph_inputs"] = f"{num_subgraph_inputs}" + list_heads = dictionary["heads"] + list_heads_updated = [] + for iterator in list_heads: + if dictionary["nodes"][iterator[0]]["op"] != "const": + if iterator[0] != 0: + get_index = dictionary["nodes"][iterator[0]]["attrs"]["mrvl_nodes_idx"][0] + new_index = get_index + num_in - 1 + list_heads_updated.append([new_index, 0, 0]) + dictionary["heads"] = list_heads_updated + + num_subgraph_outputs = str(len(list_heads_updated)) + dictionary["num_subgraph_outputs"] = f"{num_subgraph_outputs}" + + # Delete the constant nodes, these are not required for the constants file + dictionary["nodes"] = [ + feature for feature in dictionary["nodes"] if "const" not in feature["op"] + ] + + # Remove un-needed array nesting + for iterator in dictionary["nodes"]: + if iterator["op"] not in "input": + for it2 in iterator["attrs"]: + if it2 not in [ + "num_inputs", + "num_outputs", + "mrvl_nodes_idx", + "mean_const", + "var_const", + "beta_const", + "kernel_const", + "bias_const", + "gamma_const", + "input_const", + ]: + iterator["attrs"][it2] = iterator["attrs"][it2][0] + + # Now create the dltype and dlshape attributes + dltype = ["list_str"] + shape = ["list_shape"] + list_types = [] + list_shapes = [] + for iterator in dictionary["nodes"]: + list_types.append(iterator["attrs"]["dtype"][0]) + list_shapes.append(iterator["attrs"]["shape"][0]) + dltype.append(list_types) + shape.append(list_shapes) + dict_shape_type = {} + dict_shape_type["shape"] = shape + dict_shape_type["dltype"] = dltype + dictionary["attrs"] = dict_shape_type + + nodes_json_string = json.dumps(dictionary) + return nodes_json_string + + +@tvm._ffi.register_func("tvm.mrvl.ModifyConstNames") +def modify_const_names(nodes_json_str, consts_json_str): + """This takes the graph module returned by relay.build an generates nodes and constant + meta data suitable for compilation by the back end. + + Parameters + ---------- + nodes_json_str: string + The nodes json string suitable for the Marvell backend. + + consts_json_str: string + The consts_json_string generated by the backend compiler. + + Returns + ------- + modified_nodes_consts: string + This returns a concatenated string of the nodes_json and modified + consts json file, seperated by a delimiter |. The modification to the + consts file is necessary since we have added the Merge Compiler Pass + which names the constants in a form unsuitable for the backend. + """ + + nodes = json.loads(nodes_json_str) + const = json.loads(consts_json_str) + for iterator in nodes["nodes"]: + hasBias = False + for attrs in iterator["attrs"]: + if attrs == "bias_const_name": + hasBias = True + for attrs in iterator["attrs"]: + if attrs == "kernel_const_name": + new_name = iterator["name"] + "_const_0" + const[new_name] = const.pop(iterator["attrs"][attrs][0]) + iterator["attrs"][attrs][0] = new_name + map_kernel = {} + map_kernel["shape"] = const[new_name]["shape"] + map_kernel["dtype"] = const[new_name]["dtype"] + map_kernel["min"] = const[new_name]["min"] + map_kernel["max"] = const[new_name]["max"] + map_kernel["name"] = new_name + iterator["attrs"]["kernel_const"] = map_kernel + if attrs == "bias_const_name": + new_name = iterator["name"] + "_const_1" + const[new_name] = const.pop(iterator["attrs"][attrs][0]) + iterator["attrs"][attrs][0] = new_name + bias_map = {} + bias_map["shape"] = const[new_name]["shape"] + bias_map["dtype"] = const[new_name]["dtype"] + bias_map["min"] = const[new_name]["min"] + bias_map["max"] = const[new_name]["max"] + bias_map["name"] = new_name + iterator["attrs"]["bias_const"] = bias_map + if attrs == "gamma_const_name": + if hasBias: + new_name = iterator["name"] + "_const_2" + else: + new_name = iterator["name"] + "_const_1" + const[new_name] = const.pop(iterator["attrs"][attrs][0]) + iterator["attrs"][attrs][0] = new_name + gamma_map = {} + gamma_map["shape"] = const[new_name]["shape"] + gamma_map["dtype"] = const[new_name]["dtype"] + gamma_map["name"] = new_name + iterator["attrs"]["gamma_const"] = gamma_map + if attrs == "beta_const_name": + if hasBias: + new_name = iterator["name"] + "_const_3" + else: + new_name = iterator["name"] + "_const_2" + const[new_name] = const.pop(iterator["attrs"][attrs][0]) + iterator["attrs"][attrs][0] = new_name + beta_map = {} + beta_map["shape"] = const[new_name]["shape"] + beta_map["dtype"] = const[new_name]["dtype"] + beta_map["name"] = new_name + iterator["attrs"]["beta_const"] = beta_map + if attrs == "mean_const_name": + if hasBias: + new_name = iterator["name"] + "_const_4" + else: + new_name = iterator["name"] + "_const_3" + const[new_name] = const.pop(iterator["attrs"][attrs][0]) + iterator["attrs"][attrs][0] = new_name + mean_map = {} + mean_map["shape"] = const[new_name]["shape"] + mean_map["dtype"] = const[new_name]["dtype"] + mean_map["name"] = new_name + iterator["attrs"]["mean_const"] = mean_map + if attrs == "var_const_name": + if hasBias: + new_name = iterator["name"] + "_const_5" + else: + new_name = iterator["name"] + "_const_4" + const[new_name] = const.pop(iterator["attrs"][attrs][0]) + iterator["attrs"][attrs][0] = new_name + var_map = {} + var_map["shape"] = const[new_name]["shape"] + var_map["dtype"] = const[new_name]["dtype"] + var_map["name"] = new_name + iterator["attrs"]["var_const"] = var_map + if attrs == "input_const_name": + new_name = iterator["name"] + "_const_0" + const[new_name] = const.pop(iterator["attrs"][attrs][0]) + const[new_name]["shape"] = list(map(int, iterator["attrs"]["input_const_shape"])) + iterator["attrs"][attrs][0] = new_name + map_const = {} + map_const["shape"] = const[new_name]["shape"] + map_const["dtype"] = const[new_name]["dtype"] + map_const["min"] = const[new_name]["min"] + map_const["max"] = const[new_name]["max"] + map_const["name"] = new_name + iterator["attrs"]["input_const"] = map_const + + nodes_mod_str = json.dumps(nodes, indent=2) + const_mod_str = json.dumps(const, indent=2) + return nodes_mod_str + "|" + const_mod_str + + +def get_working_dir(): + """Obtain the current working directory from where tvm is invoked""" + return os.getcwd() + + +@tvm._ffi.register_func("tvm.mrvl.WriteJsonFile") +def write_json_file(json_string, json_filename): + """Generate json file under working directory""" + working_dir = get_working_dir() + json_file = os.path.join(working_dir, json_filename) + with open(json_file, "w") as out_file: + out_file.write(json_string) + return json_file + + +def delete_temp_files(symbol_name): + """Delete temporary files generated by the Marvell compiler""" + working_dir = get_working_dir() + nodes_json_file = os.path.join(working_dir, f"{symbol_name}-nodes.json") + consts_json_file = os.path.join(working_dir, f"{symbol_name}-consts.json") + os.remove(nodes_json_file) + os.remove(consts_json_file) + bin_folder = os.path.join(working_dir, "bin_" + symbol_name) + if "MRVL_SAVE_MODEL_BIN" not in os.environ: + shutil.rmtree(bin_folder) + + +@tvm._ffi.register_func("tvm.mrvl.CompileModel") +def compile_model( + symbol_name, + nodes_json_string, + consts_json_string, + compiler_opts, +): + """Compile the model using Marvell Backend compiler and return the generated binary""" + # generate pair of json files + nodes_json_file = write_json_file(nodes_json_string, f"{symbol_name}-nodes.json") + consts_json_file = write_json_file(consts_json_string, f"{symbol_name}-consts.json") + mrvl_exec = "mrvl-tmlc" + exec_on_path = shutil.which(mrvl_exec) + if exec_on_path is None: + error_msg = ( + "Marvell Compiler not found! Please specify the path to Marvell tools " + "by adding it to $PATH." + ) + raise RuntimeError(error_msg) + + # Parse the nodes_json string for the batch size + dictionary = json.loads(nodes_json_string) + batch_size = dictionary["batch_size"] + + # Check for supported batch size + if int(batch_size) > 8: + error_msg = "Compilation ERROR: mrvl-tmlc supports batch_size <= 8" + raise RuntimeError(error_msg) + + # Invoke Marvell Backend with appropriate options + compile_cmd = ( + mrvl_exec + + " -mn " + + symbol_name + + " -f1 " + + nodes_json_file + + " -f2 " + + consts_json_file + + " " + + compiler_opts + + " -b " + + batch_size + ) + + ret_val = os.system(compile_cmd) + if ret_val == 0: + # Read generated binary and encode in base64 format + working_dir = get_working_dir() + bin_file = os.path.join(working_dir, "bin_" + symbol_name, symbol_name + ".bin") + + with open(bin_file, "rb") as f: + data = bytearray(f.read()) + base64_bytes = base64.b64encode(data) + if not data: + raise RuntimeError("Compilation ERROR: Marvell binary could not be generated") + # Cleanup Temporary Files + delete_temp_files(symbol_name) + return base64_bytes + else: + error_msg = "Compilation ERROR: Error compiling Marvell region!" + raise RuntimeError(error_msg) + + +@tvm._ffi.register_func("tvm.mrvl.CleanUpSim") +def clean_up_sim(bin_file, input_json, input_bin, out_bin_prefix, num_outputs): + os.remove(bin_file) + os.remove(input_json) + os.remove(input_bin) + for i in range(num_outputs): + out_bin = out_bin_prefix + "-" + str(i) + ".bin" + os.remove(out_bin) + + +@tvm._ffi.register_func("tvm.mrvl.SearchPath") +def search_path(file_name): + path = shutil.which(file_name) + if path is None: + return "" + return os.path.dirname(path) + + +@tvm._ffi.register_func("tvm.mrvl.JsonToBin") +def convert_json_to_bin(json_file, input_bin_file): + with open(json_file) as input_json: + data = json.load(input_json) + data_float = np.array(data["inputs"], dtype=np.float32) + data_b = data_float.tobytes() + with open(input_bin_file, "wb") as f: + f.write(data_b) + + +@tvm._ffi.register_func("tvm.mrvl.RunSim") +def run_simulation(run_command, sim_directory): + cwd_path = get_working_dir() + os.mkdir(sim_directory) + os.chdir(sim_directory) + os.system(run_command) + os.chdir(cwd_path) + shutil.rmtree(sim_directory) + + +@tvm._ffi.register_func("tvm.mrvl.TempDir") +def get_temp_dir(): + return tempfile.gettempdir() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mxnet.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mxnet.py new file mode 100644 index 0000000000000000000000000000000000000000..6e551dfe46e3f9daf9acad78da586d62882d3c79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/mxnet.py @@ -0,0 +1,78 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""MXNet bridge wrap Function MXNet's async function.""" +from __future__ import absolute_import as _abs + +import tvm._ffi.registry +import tvm.runtime._ffi_api +from tvm.runtime import Module + +# pylint: disable=invalid-name +_wrap_async = None + + +def to_mxnet_func(func, const_loc=None): + """Wrap a TVM function as MXNet function + + MXNet function runs asynchrously via its engine. + + Parameters + ---------- + func : Function + A TVM function that can take positional arguments + + const_loc : list of int + List of integers indicating the argument position + of read only NDArray argument. + The NDArray argument location that are not annotated + will be viewed as mutable arrays in MXNet's engine. + + Returns + ------- + async_func : Function + A function that can take MXNet NDArray as argument + in places that used to expect TVM NDArray. + Run asynchrously in MXNet's async engine. + """ + # only import mxnet when wrap get called. + # pylint: disable=import-self, import-outside-toplevel + import mxnet + + if isinstance(func, Module): + func = func.entry_func + + def _get_bridge_func(): + """Get MXNet bridge function""" + if not mxnet.base._LIB.MXTVMBridge: + raise RuntimeError( + "MXTVMBridge not exist in mxnet package," " please update to latest version" + ) + + fdict = tvm._ffi.registry.extract_ext_funcs(mxnet.base._LIB.MXTVMBridge) + ret = fdict["WrapAsyncCall"] + ret.is_global = True + return ret + + global _wrap_async + + if _wrap_async is None: + # Register extension type in first time + _wrap_async = _get_bridge_func() + tvm._ffi.registry.register_extension(mxnet.nd.NDArray) + + const_loc = const_loc if const_loc else [] + return _wrap_async(func, tvm.runtime._ffi_api.TVMSetStream, len(const_loc), *const_loc) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ndk.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ndk.py new file mode 100644 index 0000000000000000000000000000000000000000..14820c0ca8ab72efe4bdbb378a45a9efb2c852ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/ndk.py @@ -0,0 +1,166 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Util to invoke NDK compiler toolchain.""" +# pylint: disable=invalid-name +from __future__ import absolute_import as _abs + +import subprocess +import os +import shutil +from typing import Dict +import tempfile +from pathlib import Path + +from .._ffi import register_func +from .._ffi.base import py_str +from . import utils as _utils, tar as _tar, cc as _cc +from .cc import get_target_by_dump_machine + + +def create_shared(output, objects, options=None): + """Create shared library. + + Parameters + ---------- + output : str + The target shared library. + + objects : list + List of object files. + + options : list of str, optional + The additional options. + """ + if "TVM_NDK_CC" not in os.environ: + raise RuntimeError( + "Require environment variable TVM_NDK_CC" " to be the NDK standalone compiler" + ) + compiler = os.environ["TVM_NDK_CC"] + cmd = [compiler] + cmd += ["-o", output] + + if isinstance(objects, str): + cmd += [objects] + else: + cmd += objects + + options = options if options else ["-shared", "-fPIC", "-lm"] + cmd += options + + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = "Compilation error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + +# assign output format +create_shared.output_format = "so" +create_shared.get_target_triple = ( + get_target_by_dump_machine(os.environ["TVM_NDK_CC"]) if "TVM_NDK_CC" in os.environ else None +) + + +def create_staticlib(output, inputs): + """Create static library: + + Parameters + ---------- + output : str + The target static library. + + inputs : list + List of object files or tar files + """ + if "TVM_NDK_CC" not in os.environ: + raise RuntimeError( + "Require environment variable TVM_NDK_CC" " to be the NDK standalone compiler" + ) + output_name = os.path.basename(output) + + temp = _utils.tempdir() + tmp_output = temp.relpath("lib" + output_name) + objects = _tar.normalize_file_list_by_unpacking_tars(temp, inputs) + + compiler = os.environ["TVM_NDK_CC"] + base_path = os.path.dirname(compiler) + ar_path = os.path.join(base_path, "llvm-ar") + cmd = [ar_path] + cmd += ["qcs", tmp_output] + cmd += objects + + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + if proc.returncode != 0: + msg = "AR error:\n" + msg += py_str(out) + msg += "\nCommand line: " + " ".join(cmd) + raise RuntimeError(msg) + + ranlib_path = os.path.join(base_path, "llvm-ranlib") + cmd = [ranlib_path] + cmd += [tmp_output] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + if proc.returncode != 0: + msg = "Ranlib error:\n" + msg += py_str(out) + msg += "\nCommand line: " + " ".join(cmd) + raise RuntimeError(msg) + + shutil.move(tmp_output, output) + + +create_staticlib.output_format = "a" + + +def get_global_symbol_section_map(path, *, nm=None) -> Dict[str, str]: + """Get global symbols from a library via nm -gU in NDK + + Parameters + ---------- + path : str + The library path + + nm: str + The path to nm command + + Returns + ------- + symbol_section_map: Dict[str, str] + A map from defined global symbol to their sections + """ + if "TVM_NDK_CC" not in os.environ: + raise RuntimeError( + "Require environment variable TVM_NDK_CC" " to be the NDK standalone compiler" + ) + if nm is None: + compiler = os.environ["TVM_NDK_CC"] + base_path = os.path.dirname(compiler) + nm = os.path.join(base_path, "llvm-nm") + return _cc.get_global_symbol_section_map(path, nm=nm) + + +@register_func("meta_schedule.builder.export_ndk") +def _ndk_export(mod): + tmp_dir = tempfile.mkdtemp() + binary_name = "tmp_binary.so" + binary_path = Path(tmp_dir) / binary_name + mod.export_library(binary_path, fcompile=create_shared) + return str(binary_path) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/nnpack.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/nnpack.py new file mode 100644 index 0000000000000000000000000000000000000000..010bef533c002eb4aa591e62d5b0b583604d1539 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/nnpack.py @@ -0,0 +1,235 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to NNPACK libraries.""" +import tvm +from tvm import te +import tvm._ffi + + +def is_available(): + """Check whether NNPACK is available, that is, `nnp_initialize()` + returns `nnp_status_success`. + """ + return _initialize() == 0 + + +def fully_connected_inference(lhs, rhs, nthreads=1): + """Create an extern op that compute fully connected of 1D tensor lhs and + 2D tensor rhs with nnpack. + + Parameters + ---------- + lhs : Tensor + lhs 1D array input[input_channels] of FP32 elements + rhs : Tensor + lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements + + Returns + ------- + C : Tensor + lhs 1D array out[output_channels] of FP32 elements. + """ + m = rhs.shape[0] + return te.extern( + (m,), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.nnpack.fully_connected_inference", ins[0], ins[1], outs[0], nthreads + ), + name="C", + ) + + +class ConvolutionAlgorithm: + AUTO = 0 + FFT_8x8 = 1 + FFT_16x16 = 2 + WT_8x8 = 3 + IMPLICIT_GEMM = 4 + DIRECT = 5 + WT_8x8_FP16 = 6 + + +class ConvolutionTransformStrategy: + COMPUTE = 1 + PRECOMPUTE = 2 + + +def convolution_inference( + data, kernel, bias, padding, stride, nthreads=1, algorithm=ConvolutionAlgorithm.AUTO +): + """Create an extern op to do inference convolution of 4D tensor data and + 4D tensor kernel and 1D tensor bias with nnpack. + + Parameters + ---------- + data : Tensor + data 4D tensor input[batch][input_channels][input_height][input_width] of + FP32 elements. + kernel : Tensor + kernel 4D tensor kernel[output_channels][input_channels][kernel_height] + [kernel_width] of FP32 elements. + bias : Tensor + bias 1D array bias[output_channels][input_channels][kernel_height] + [kernel_width] of FP32 elements. + padding : list + padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right], + which indicates the padding around the feature map. + stride : list + stride A 2-dim list of [stride_height, stride_width], which indicates + the stride. + + Returns + ------- + output : Tensor + output 4D tensor output[batch][output_channels][output_height][output_width] + of FP32 elements. + """ + + assert isinstance(padding, list) and len(padding) == 4 + assert isinstance(stride, list) and len(stride) == 2 + batch, _, input_height, input_width = data.shape + output_channels, _, kernel_height, kernel_width = kernel.shape + idxdiv = te.indexdiv + output_height = idxdiv(input_height + padding[0] + padding[1] - kernel_height, stride[0]) + 1 + output_width = idxdiv(input_width + padding[0] + padding[1] - kernel_width, stride[1]) + 1 + + return te.extern( + (batch, output_channels, output_height, output_width), + [data, kernel, bias] if bias is not None else [data, kernel], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.nnpack.convolution_inference", + ins[0], + ins[1], + ins[2] if bias is not None else 0, + outs[0], + padding[0], + padding[1], + padding[2], + padding[3], + stride[0], + stride[1], + nthreads, + algorithm, + ), + name="C", + ) + + +def convolution_inference_without_weight_transform( + data, transformed_kernel, bias, padding, stride, nthreads=1, algorithm=ConvolutionAlgorithm.AUTO +): + """Create an extern op to do inference convolution of 4D tensor data and + 4D pre-transformed tensor kernel and 1D tensor bias with nnpack. + + Parameters + ---------- + data : Tensor + data 4D tensor input[batch][input_channels][input_height][input_width] of + FP32 elements. + transformed_kernel : Tensor + transformed_kernel 4D tensor kernel[output_channels][input_channels][tile] + [tile] of FP32 elements. + bias : Tensor + bias 1D array bias[output_channels][input_channels][kernel_height] + [kernel_width] of FP32 elements. + padding : list + padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right], + which indicates the padding around the feature map. + stride : list + stride A 2-dim list of [stride_height, stride_width], which indicates + the stride. + + Returns + ------- + output : Tensor + output 4D tensor output[batch][output_channels][output_height][output_width] + of FP32 elements. + """ + + assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16) + assert isinstance(padding, list) and len(padding) == 4 + assert isinstance(stride, list) and len(stride) == 2 + batch, _, input_height, input_width = data.shape + output_channels, _, _, _ = transformed_kernel.shape + kernel_height, kernel_width = (3, 3) + idxdiv = te.indexdiv + output_height = idxdiv(input_height + padding[0] + padding[1] - kernel_height, stride[0]) + 1 + output_width = idxdiv(input_width + padding[0] + padding[1] - kernel_width, stride[1]) + 1 + + return te.extern( + (batch, output_channels, output_height, output_width), + [data, transformed_kernel, bias] if bias is not None else [data, transformed_kernel], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.nnpack.convolution_inference_without_weight_transform", + ins[0], + ins[1], + ins[2] if bias is not None else 0, + outs[0], + padding[0], + padding[1], + padding[2], + padding[3], + stride[0], + stride[1], + nthreads, + algorithm, + ), + name="C", + dtype="float32", + ) + + +def convolution_inference_weight_transform( + kernel, nthreads=1, algorithm=ConvolutionAlgorithm.AUTO, dtype="float32" +): + """Create an extern op to do inference convolution of 3D tensor data and + 4D tensor kernel and 1D tensor bias with nnpack. + + Parameters + ---------- + kernel : Tensor + kernel 4D tensor kernel[output_channels][input_channels][kernel_height] + [kernel_width] of FP32 elements. + + Returns + ------- + output : Tensor + output 4D tensor output[output_channels][input_channels][tile][tile] + of FP32 elements. + """ + assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16) + output_channels, input_channels, _, _ = kernel.shape + transform_tile_size = 8 + if not isinstance(dtype, str): + dtype = dtype.dtype + return te.extern( + (output_channels, input_channels, transform_tile_size, transform_tile_size), + [kernel], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.nnpack.convolution_inference_weight_transform", + ins[0], + outs[0], + nthreads, + algorithm, + ), + name="transform_kernel", + dtype=dtype, + ) + + +tvm._ffi._init_api("tvm.contrib.nnpack") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/nvcc.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/nvcc.py new file mode 100644 index 0000000000000000000000000000000000000000..2d83cde791631265722cecac1da0bc876a592cb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/nvcc.py @@ -0,0 +1,450 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Utility to invoke nvcc compiler in the system""" +from __future__ import absolute_import as _abs + +import os +import subprocess +import warnings + +import tvm._ffi +from tvm.target import Target + +from .._ffi.base import py_str +from . import utils + + +def compile_cuda(code, target_format="ptx", arch=None, options=None, path_target=None, verbose=False): + """Compile cuda code with NVCC from env. + + Parameters + ---------- + code : str + The cuda code. + + target_format : str + The target format of nvcc compiler. + + arch : str + The cuda architecture. + + options : str or list of str + The additional options. + + path_target : str, optional + Output file. + + Return + ------ + cubin : bytearray + The bytearray of the cubin + """ + if arch is None: + # If None, then it will use `tvm.target.Target.current().arch`. + # Target arch could be a str like "sm_xx", or a list, such as + # [ + # "-gencode", "arch=compute_52,code=sm_52", + # "-gencode", "arch=compute_70,code=sm_70" + # ] + compute_version = "".join( + get_target_compute_version(Target.current(allow_none=True)).split(".") + ) + arch = ["-gencode", f"arch=compute_{compute_version},code=sm_{compute_version}"] + + temp = utils.tempdir() + file_name = "tvm_kernels" + if target_format not in ["cubin", "ptx", "fatbin"]: + raise ValueError("target_format must be in cubin, ptx, fatbin") + temp_code = temp.relpath(f"{file_name}.cu") + temp_target = temp.relpath(f"{file_name}.{target_format}") + + pass_context = tvm.get_global_func("transform.GetCurrentPassContext")() + kernels_output_dir = ( + pass_context.config["cuda.kernels_output_dir"] + if "cuda.kernels_output_dir" in pass_context.config + else None + ) + if kernels_output_dir is not None: + if not os.path.isdir(kernels_output_dir): + os.makedirs(kernels_output_dir) + temp_code = os.path.join(kernels_output_dir, f"{file_name}.cu") + temp_target = os.path.join(kernels_output_dir, f"{file_name}.{target_format}") + + with open(temp_code, "w") as out_file: + out_file.write(code) + + file_target = path_target if path_target else temp_target + cmd = ["nvcc"] + cmd += [f"--{target_format}", "-O3"] + if kernels_output_dir is not None: + cmd += ["-lineinfo"] + if isinstance(arch, list): + cmd += arch + elif isinstance(arch, str): + cmd += ["-arch", arch] + + if options: + if isinstance(options, str): + cmd += [options] + elif isinstance(options, list): + cmd += options + else: + raise ValueError("options must be str or list of str") + + cmd += ["-o", file_target] + cmd += [temp_code] + + # NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler + # just in case it is not in the path. On Windows it is not in the path by default. + # However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env. + # Because it is hard to do runtime compiler detection, we require nvcc is configured + # correctly by default. + # if cxx_compiler_path != "": + # cmd += ["-ccbin", cxx_compiler_path] + + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + (out, _) = proc.communicate() + + if verbose: + print(py_str(out)) + + if proc.returncode != 0: + msg = code + msg += "\nCompilation error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + with open(file_target, "rb") as f: + data = bytearray(f.read()) + if not data: + raise RuntimeError("Compilation error: empty result is generated") + return data + + +def find_cuda_path(): + """Utility function to find cuda path + + Returns + ------- + path : str + Path to cuda root. + """ + if "CUDA_PATH" in os.environ: + return os.environ["CUDA_PATH"] + cmd = ["which", "nvcc"] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + out = py_str(out) + if proc.returncode == 0: + return os.path.realpath(os.path.join(str(out).strip(), "../..")) + cuda_path = "/usr/local/cuda" + if os.path.exists(os.path.join(cuda_path, "bin/nvcc")): + return cuda_path + raise RuntimeError("Cannot find cuda path") + + +def get_cuda_version(cuda_path=None): + """Utility function to get cuda version + + Parameters + ---------- + cuda_path : Optional[str] + + Path to cuda root. If None is passed, will use + `find_cuda_path()` as default. + + Returns + ------- + version : float + The cuda version + + """ + if cuda_path is None: + cuda_path = find_cuda_path() + + version_file_path = os.path.join(cuda_path, "version.txt") + if not os.path.exists(version_file_path): + # Debian/Ubuntu repackaged CUDA path + version_file_path = os.path.join(cuda_path, "lib", "cuda", "version.txt") + try: + with open(version_file_path) as f: + version_str = f.read().strip().split()[-1] + return tuple(int(field) for field in version_str.split(".")) + except FileNotFoundError: + pass + + cmd = [os.path.join(cuda_path, "bin", "nvcc"), "--version"] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + out = py_str(out) + if proc.returncode == 0: + release_line = [l for l in out.split("\n") if "release" in l][0] + release_fields = [s.strip() for s in release_line.split(",")] + version_str = [f[1:] for f in release_fields if f.startswith("V")][0] + return tuple(int(field) for field in version_str.split(".")) + raise RuntimeError("Cannot read cuda version file") + + +@tvm._ffi.register_func +def tvm_callback_cuda_compile(code, target): # pylint: disable=unused-argument + """use nvcc to generate fatbin code for better optimization""" + ptx = compile_cuda(code, target_format="fatbin") + return ptx + + +@tvm._ffi.register_func("tvm_callback_libdevice_path") +def find_libdevice_path(arch): + """Utility function to find libdevice + + Parameters + ---------- + arch : int + The compute architecture in int + + Returns + ------- + path : str + Path to libdevice. + """ + cuda_path = find_cuda_path() + lib_path = os.path.join(cuda_path, "nvvm/libdevice") + if not os.path.exists(lib_path): + # Debian/Ubuntu repackaged CUDA path + lib_path = os.path.join(cuda_path, "lib/nvidia-cuda-toolkit/libdevice") + selected_ver = 0 + selected_path = None + cuda_ver = get_cuda_version(cuda_path) + major_minor = (cuda_ver[0], cuda_ver[1]) + if major_minor in ( + (9, 0), + (9, 1), + (10, 0), + (10, 1), + (10, 2), + (11, 0), + (11, 1), + (11, 2), + (11, 3), + ): + path = os.path.join(lib_path, "libdevice.10.bc") + else: + for fn in os.listdir(lib_path): + if not fn.startswith("libdevice"): + continue + + try: + # expected pattern: libdevice.${ARCH}.10.bc + # e.g., libdevice.compute_20.10.bc + ver = int(fn.split(".")[-3].split("_")[-1]) + if selected_ver < ver <= arch: + selected_ver = ver + selected_path = fn + except ValueError: + # it can just be `libdevice.10.bc` in CUDA 10 + selected_path = fn + + if selected_path is None: + raise RuntimeError(f"Cannot find libdevice for arch {arch}") + path = os.path.join(lib_path, selected_path) + return path + + +def callback_libdevice_path(arch): + try: + return find_libdevice_path(arch) + except RuntimeError: + warnings.warn("Cannot find libdevice path") + return "" + + +@tvm._ffi.register_func("tvm.contrib.nvcc.get_compute_version") +def get_target_compute_version(target=None): + """Utility function to get compute capability of compilation target. + + Looks for the target arch in three different places, first in the target input, then the + Target.current() scope, and finally the GPU device (if it exists). + + Parameters + ---------- + target : tvm.target.Target, optional + The compilation target + + Returns + ------- + compute_version : str + compute capability of a GPU (e.g. "8.6") + """ + # 1. input target object + # 2. Target.current() + target = target or Target.current() + if target and target.arch: + arch = target.arch.split("_")[1] + if len(arch) == 2: + major, minor = arch + return major + "." + minor + elif len(arch) == 3: + # This is for arch like "sm_90a" + major, minor, suffix = arch + return major + "." + minor + "." + suffix + + # 3. GPU compute version + if tvm.cuda(0).exist: + return tvm.cuda(0).compute_version + + raise ValueError( + "No CUDA architecture was specified or GPU detected." + "Try specifying it by adding '-arch=sm_xx' to your target." + ) + + +def parse_compute_version(compute_version): + """Parse compute capability string to divide major and minor version + + Parameters + ---------- + compute_version : str + compute capability of a GPU (e.g. "6.0") + + Returns + ------- + major : int + major version number + minor : int + minor version number + """ + split_ver = compute_version.split(".") + try: + major = int(split_ver[0]) + minor = int(split_ver[1]) + return major, minor + except (IndexError, ValueError) as err: + # pylint: disable=raise-missing-from + raise RuntimeError("Compute version parsing error: " + str(err)) + + +def have_fp16(compute_version): + """Either fp16 support is provided in the compute capability or not + + Parameters + ---------- + compute_version: str + compute capability of a GPU (e.g. "6.0") + """ + major, minor = parse_compute_version(compute_version) + # fp 16 support in reference to: + # https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions + if major == 5 and minor == 3: + return True + if major >= 6: + return True + + return False + + +def have_int8(compute_version): + """Either int8 support is provided in the compute capability or not + + Parameters + ---------- + compute_version : str + compute capability of a GPU (e.g. "6.1") + """ + major, _ = parse_compute_version(compute_version) + if major >= 6: + return True + + return False + + +def have_tensorcore(compute_version=None, target=None): + """Either TensorCore support is provided in the compute capability or not + + Parameters + ---------- + compute_version : str, optional + compute capability of a GPU (e.g. "7.0"). + + target : tvm.target.Target, optional + The compilation target, will be used to determine arch if compute_version + isn't specified. + """ + if compute_version is None: + if tvm.cuda(0).exist: + compute_version = tvm.cuda(0).compute_version + else: + if target is None or "arch" not in target.attrs: + warnings.warn( + "Tensorcore will be disabled due to no CUDA architecture specified." + "Try specifying it by adding '-arch=sm_xx' to your target." + ) + return False + compute_version = target.attrs["arch"] + # Compute version will be in the form "sm_{major}{minor}" + major, minor = compute_version.split("_")[1] + compute_version = major + "." + minor + major, _ = parse_compute_version(compute_version) + if major >= 7: + return True + + return False + + +def have_cudagraph(): + """Either CUDA Graph support is provided""" + try: + cuda_ver = get_cuda_version() + if cuda_ver < (10, 0): + return False + return True + except RuntimeError: + return False + + +@tvm._ffi.register_func("tvm.contrib.nvcc.supports_bf16") +def have_bf16(compute_version): + """Either bf16 support is provided in the compute capability or not + + Parameters + ---------- + compute_version : str + compute capability of a GPU (e.g. "8.0") + """ + major, _ = parse_compute_version(compute_version) + if major >= 8: + return True + + return False + + +@tvm._ffi.register_func("tvm.contrib.nvcc.supports_fp8") +def have_fp8(compute_version): + """Whether fp8 support is provided in the specified compute capability or not + + Parameters + ---------- + compute_version : str + GPU capability + """ + major, minor = parse_compute_version(compute_version) + # fp8 is suppored in Ada Lovelace (8.9) or later architectures. + if major == 8 and minor == 9: + return True + if major >= 9: + return True + return False diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/peak.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/peak.py new file mode 100644 index 0000000000000000000000000000000000000000..78dae846d6ca2ba5774747d97bb9f380144a713e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/peak.py @@ -0,0 +1,394 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""measure bandwidth and compute peak""" + +import logging +import tvm +from tvm import te +from tvm.target import Target +from . import utils +from .. import rpc + + +def _convert_to_remote(func, remote): + """convert module function to remote rpc function""" + temp = utils.tempdir() + path_dso = temp.relpath("tmp_func.tar") + func.export_library(path_dso) + + remote.upload(path_dso) + func = remote.load_module("tmp_func.tar") + return func + + +def measure_bandwidth_sum( + total_item, + item_per_thread, + stride, + base_type, + bits, + lanes, + target, + target_host, + remote, + dev, + n_times, +): + """measure memory bandwidth of gpu by product reduction for a given type + + The IR for measurement is + + for each thread + for i in 1..num_per_thread: + y[global_id] = y[global_id] * x[base + i * stride] + + Parameters + ---------- + total_item: int + number of elements in input array + item_per_thread: int + number of elements each thread accumulates + stride: int + stride in memory access + base_type: str + can be "int", "float" + bits: int + can be 16, 32 + lanes: int + lane of the vector type, can be 1, 2, 4, 8, 16 + target: :any:`tvm.target.Target` + the target and option of the compilation. + target_host : str or :any:`tvm.target.Target` + host compilation target + dev: Device + the device of array + remote: tvm.rpc.RPCSession + remote rpc session + n_times: int + number of runs for taking mean + + Returns + ------- + GBPS: float + gigabyte per second + """ + target, target_host = Target.canon_target_and_host(target, target_host) + + n, m = total_item, item_per_thread + n //= lanes + + base_type = str(base_type) + str(bits) + dtype = base_type if lanes == 1 else base_type + "x" + str(lanes) + + k = te.reduce_axis((0, m), name="k") + + x = te.placeholder((n,), dtype=dtype, name="x") + op = te.comm_reducer(lambda x, y: x * y, lambda t: tvm.tir.const(1, dtype=t), name="sum") + y = te.compute( + (n // m,), lambda i: op(x[i // stride * stride * m + i % stride + k * stride], axis=k) + ) + s = te.create_schedule(y.op) + + yo, yi = s[y].split(y.op.axis[0], target.max_num_threads) + s[y].bind(yo, te.thread_axis("blockIdx.x")) + s[y].bind(yi, te.thread_axis("threadIdx.x")) + s[y].unroll(k) + + try: + func = tvm.build(s, [x, y], target) + + x = tvm.nd.empty((n,), dtype=dtype, device=dev) + y = tvm.nd.empty((n // m,), dtype=dtype, device=dev) + + func = _convert_to_remote(func, remote) + time_f = func.time_evaluator(func.entry_name, dev, number=n_times) + time = time_f(x, y).mean + except tvm._ffi.base.TVMError: + # build error (occur when device does not support half) + return -1 + + return 1.0 * (total_item * bits / 8) / 1e9 / time + + +def measure_bandwidth_all_types( + total_item, item_per_thread, n_times, target, target_host, remote, dev, verbose=True +): + """measure memory bandwidth for all types + + Parameters + ---------- + total_item: int + number of elements in input array + item_per_thread: int + number of elements each thread accmulates + n_times: int + number of runs for averaging + target: :any:`tvm.target.Target` + the target and option of the compilation. + target_host : str or :any:`tvm.target.Target` + host compilation target + remote: tvm.rpc.RPCSession + remote rpc session + dev: Device + the device of array + verbose: bool + whether outputs immediate result + + Returns + ------- + result: list + a list of (type_name, GBPS) pairs + """ + target, target_host = Target.canon_target_and_host(target, target_host) + max_threads = target.max_num_threads + + result = [] + for base_type in ["float"]: + for bits in [32]: + for lanes in [1, 2, 4, 8, 16]: + max_speed = -1e9 + # try different strides + for stride in [max_threads, total_item // (lanes * item_per_thread)]: + speed = measure_bandwidth_sum( + total_item, + item_per_thread, + stride, + base_type, + bits, + lanes, + target, + target_host, + remote, + dev, + n_times, + ) + max_speed = max(max_speed, speed) + type_name = base_type + str(bits) + result.append([f"{type_name}x{lanes}", max_speed]) + if verbose: + logging.info("\t%-10s %.2f GBPS", result[-1][0], result[-1][1]) + return result + + +def measure_compute_mad( + total_item, item_per_thread, base_type, bits, lanes, target, target_host, remote, dev, n_times +): + """measure peak compute speed by computing mad for a type + + The IR for measurement is + + for each thread + for i in 1..item_per_thread + x = mad(x, x, y) + y = mad(y, y, x) + + Parameters + ---------- + total_item: int + number of elements in input array + item_per_thread: int + number of operations each thread does + base_type: str + can be "int", "float" + bits: int + can be 16, 32 + lanes: int + lane of the vector type, can be 1, 2, 4, 8, 16 + target: :any:`tvm.target.Target` + the target and option of the compilation. + target_host : str or :any:`tvm.target.Target` + host compilation target + remote: tvm.rpc.RPCSession + if it is not None, use remote rpc session + dev: Device + the device of array + n_times: int + number of runs for taking mean + + Returns + ------- + GOPS: float + giga operation per second + """ + target, target_host = Target.canon_target_and_host(target, target_host) + + n = total_item + + if bits >= 64 or lanes >= 16: + n //= 2 + + max_threads = target.max_num_threads + + base_type = str(base_type) + str(bits) + dtype = base_type if lanes == 1 else base_type + "x" + str(lanes) + + def extern(ins, outs): + # pylint: disable=unused-argument + """construct measurement function by building IR directly""" + ib = tvm.tir.ir_builder.create() + + bx = te.thread_axis("blockIdx.x") + tx = te.thread_axis("threadIdx.x") + + ib.scope_attr(bx, "thread_extent", n // max_threads) + ib.scope_attr(tx, "thread_extent", max_threads) + + idx = bx.var * max_threads + tx.var + + a = ib.allocate(dtype, (1), name="a", scope="local") + b = ib.allocate(dtype, (1), name="b", scope="local") + + a[0] = outs[0].vload(idx, dtype) + b[0] = outs[0].vload(idx, dtype) + + if base_type.find("float") != -1: + + def mad_func(x, y): + return x * x + y + + else: + + def mad_func(x, y): + return y * y + x + + for _ in range(item_per_thread // 4 // lanes): + a[0] = mad_func(a[0], b[0]) + b[0] = mad_func(b[0], a[0]) + + ib.emit(outs[0].vstore(idx, b[0])) + return ib.get() + + y = te.extern((n,), [], extern, name="y", dtype=dtype) + s = te.create_schedule(y.op) + + try: + func = tvm.build(s, [y], target) + func = _convert_to_remote(func, remote) + time_f = func.time_evaluator(func.entry_name, dev, number=n_times) + y = tvm.nd.empty((n,), dtype=dtype, device=dev) + time = time_f(y).mean + except tvm._ffi.base.TVMError: + # build error (occur when device does not support half) + return -1 + + return 1.0 * (n * item_per_thread) / 1e9 / time + + +def measure_compute_all_types( + total_item, item_per_thread, n_times, target, target_host, remote, dev, verbose=True +): + """measure peak flops for all types + + Parameters + ---------- + total_item: int + number of elements in input array + item_per_thread: int + number of elements each thread accmulates + n_times: int + number of runs for averaging + target: :any:`tvm.target.Target` + the target and option of the compilation. + target_host : str or :any:`tvm.target.Target` + host compilation target + remote: tvm.rpc.RPCSession + remote rpc session + dev: Device + the device of array + verbose: bool + whether outputs immediate result + + Returns + ------- + result: list + a list of (type_name, GFLOPS/GIOPS) pairs + """ + target, target_host = Target.canon_target_and_host(target, target_host) + + result = [] + for base_type in ["float", "int"]: + for bits in [16, 32, 64]: + for lanes in [1, 2, 4, 8, 16]: + if base_type == "int" and bits != 32: # only measure int32 + continue + + max_speed = -1e9 + for per_thread in [item_per_thread // 2, item_per_thread, item_per_thread * 2]: + speed = measure_compute_mad( + total_item, + per_thread, + base_type, + bits, + lanes, + target, + target_host, + remote, + dev, + n_times, + ) + max_speed = max(max_speed, speed) + type_name = base_type + str(bits) + result.append([f"{type_name}x{lanes}", max_speed]) + + unit = "GFLOPS" if base_type == "float" else "GIOPS" + + if verbose: + logging.info("\t%-10s %.2f %s", result[-1][0], result[-1][1], unit) + + return result + + +def measure_peak_all(target, target_host, host, port): + """measure memory bandwidth and peak compute for gpu devices + + Parameters + ---------- + target: str or :any:`tvm.target.Target` + target_host: str + host: str + port: int + """ + + target, target_host = Target.canon_target_and_host(target, target_host) + remote = rpc.connect(host, port) + n_times = 20 + + bandwidth_total_item = 1 << 25 + bandwidth_item_per_thread = 32 + + compute_total_item = 1 << 21 + compute_item_per_thread = 4096 + + if str(target).startswith("opencl"): + dev = remote.cl() + elif str(target).startswith("cuda"): + dev = remote.cuda() + elif str(target).startswith("metal"): + dev = remote.metal() + else: + raise RuntimeError("Unsupported target") + + logging.info("========== measure memory bandwidth ==========") + measure_bandwidth_all_types( + bandwidth_total_item, bandwidth_item_per_thread, n_times, target, target_host, remote, dev + ) + + logging.info("========== measure peak compute ==========") + measure_compute_all_types( + compute_total_item, compute_item_per_thread, n_times, target, target_host, remote, dev + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pickle_memoize.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pickle_memoize.py new file mode 100644 index 0000000000000000000000000000000000000000..6d2ffbac0673eeb37c3c955531edbf63936b7579 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pickle_memoize.py @@ -0,0 +1,122 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Memoize result of function via pickle, used for cache testcases.""" +# pylint: disable=broad-except,superfluous-parens +import os +import sys +import atexit +from decorator import decorate +from .._ffi.base import string_types + +try: + import cPickle as pickle +except ImportError: + import pickle + + +class Cache(object): + """A cache object for result cache. + + Parameters + ---------- + key: str + The file key to the function + save_at_exit: bool + Whether save the cache to file when the program exits + """ + + cache_by_key = {} + + def __init__(self, key, save_at_exit): + cache_dir = f".pkl_memoize_py{sys.version_info[0]}" + try: + os.mkdir(cache_dir) + except FileExistsError: + pass + else: + self.cache = {} + self.path = os.path.join(cache_dir, key) + if os.path.exists(self.path): + try: + self.cache = pickle.load(open(self.path, "rb")) + except Exception: + self.cache = {} + else: + self.cache = {} + self.dirty = False + self.save_at_exit = save_at_exit + + def save(self): + if self.dirty: + print(f"Save memoize result to {self.path}") + with open(self.path, "wb") as out_file: + pickle.dump(self.cache, out_file, pickle.HIGHEST_PROTOCOL) + + +@atexit.register +def _atexit(): + """Save handler.""" + for value in Cache.cache_by_key.values(): + if value.save_at_exit: + value.save() + + +def memoize(key, save_at_exit=False): + """Memoize the result of function and reuse multiple times. + + Parameters + ---------- + key: str + The unique key to the file + save_at_exit: bool + Whether save the cache to file when the program exits + + Returns + ------- + fmemoize : function + The decorator function to perform memoization. + """ + + def _register(f): + """Registration function""" + allow_types = (string_types, int, float, tuple) + fkey = key + "." + f.__name__ + ".pkl" + if fkey not in Cache.cache_by_key: + Cache.cache_by_key[fkey] = Cache(fkey, save_at_exit) + cache = Cache.cache_by_key[fkey] + cargs = tuple(x.cell_contents for x in f.__closure__) if f.__closure__ else () + cargs = (len(cargs),) + cargs + + def _memoized_f(func, *args, **kwargs): + assert not kwargs, "Only allow positional call" + key = cargs + args + for arg in key: + if isinstance(arg, tuple): + for x in arg: + assert isinstance(x, allow_types) + else: + assert isinstance(arg, allow_types) + if key in cache.cache: + return cache.cache[key] + res = func(*args) + cache.cache[key] = res + cache.dirty = True + return res + + return decorate(f, _memoized_f) + + return _register diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pipeline_executor.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pipeline_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..d6be16653c67841e2ffd4827466514798ea31d91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pipeline_executor.py @@ -0,0 +1,345 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Pipeline executor that executes a series of modules in a pipeline fashion.""" +import json +import os +import time +from tvm import runtime +from tvm._ffi import get_global_func +from tvm.contrib import graph_executor + + +def pipeline_executor_enabled(): + """Check if the pipeline executor is enabled. + + Return + ------- + enable: bool + Return whether the pipeline executor is enabled. + """ + return get_global_func("tvm.pipeline_executor.create", allow_missing=True) is not None + + +class PipelineModule(object): + """Wrapper of runtime module, caller can use this module to set parameters and get outputs. + + Parameters + ---------- + module : Union[PipelineExecutorFactoryModule, Module] + Common interface for pipeline executor factory modules or Module. + """ + + def __init__(self, module): + if isinstance(module, PipelineExecutorFactoryModule): + self.module = module.get_pipeline_executor_module() + else: + self.module = module + # Get the packed functions from the pipeline executor. + self._get_params_group_pipeline_map = self.module["get_params_group_pipeline_map"] + self._run = self.module["run"] + self._set_param = self.module["set_param"] + self._set_input = self.module["set_input"] + self._get_input = self.module["get_input"] + self._get_output = self.module["get_output"] + self._get_num_outputs = self.module["get_num_outputs"] + self._get_num_inputs = self.module["get_num_inputs"] + self._get_input_pipeline_map = self.module["get_input_pipeline_map"] + self._get_pipe_execute_count = self.module["get_execute_count"] + + def run(self): + """Run the pipeline executor.""" + self._run() + + def get_input_pipeline_map(self, name): + """Using the "name" to get the corresponding subgraph index and also get the "input name" + of the corresponding subgraph interface. + Returns + ------- + input map: Array[str] + Returning the index and "input name" of the subgraph. + """ + return self._get_input_pipeline_map(name) + + def get_params_group_pipeline_map(self, name): + """Use the name of the parameters group to get the corresponding runtime module index. + + Parameters + ---------- + name: str + The parameter group name. + + Returns + ------- + module_index: int + The index of the runtime module. + """ + return self._get_params_group_pipeline_map(name) + + def set_input(self, key, value): + """Set the input via input name. + + Parameters + ---------- + key : str + The input name + value : array_like. + The input value + """ + self._set_input(key, value) + + def set_params(self, params_group_name, params_data): + """Set the parameter group value given the parameter group name. Note that the parameter + group name is declared in the pipeline executor config. + + Parameters + ---------- + params_group_name : str + The parameters group name. + + params_data : Dict[str, NDArray] + A map from parameter name to data. + """ + if not params_data: + raise RuntimeError('"params_data is empty!"') + + for key, val in params_data.items(): + self._set_param(params_group_name, key, val) + + def get_input(self, key): + """Get the input via an input name. + Parameters + ---------- + key : str + The input key + Returns + ------- + data : NDArray + The input data. + """ + return self._get_input(key) + + def get_output(self, synchronize=True, sleep_interval=0.001): + """Get the output. + Returns + ------- + data : Array[NDArray] + A list of output data. + synchronize : BOOL + Whether to do a synchronize poll. + sleep_interval : Float32 + When doing the synchronize loop poll, how many seconds the loop should sleep for yield. + """ + outputs = [] + if not synchronize: + outputs = self._get_output() + else: + while not outputs: + outputs = self._get_output() + time.sleep(sleep_interval) + + return outputs + + @property + def num_executing_pipeline(self): + """Getting the count of running pipeline. + Returns + ------- + count : int + The count of running pipeline. + """ + return self._get_pipe_execute_count() + + @property + def num_outputs(self): + """Get the number of outputs. + Returns + ------- + count : int + The number of outputs. + """ + return self._get_num_outputs() + + @property + def num_inputs(self): + """Get the number of inputs + Returns + ------- + count : int + The number of inputs + """ + return self._get_num_inputs() + + @staticmethod + def load_library(config_file_name): + """Import files to create a pipeline executor. + + Parameters + ---------- + config_file_name : str + Path and name of the configuration file, the configuration file contains the + disk path of the parameter file, library file, and JSON file. + """ + with open(config_file_name, "r") as file_handle: + config = file_handle.read() + config = json.loads(config) + if "load_config" not in config or "pipeline_config" not in config: + raise RuntimeError( + f'"load_config" or "pipeline_config" is missing in {config_file_name}' + ) + + # The config file used to load library, prameters, and JSON files. + with open(config["load_config"], "r") as file_handle: + load_config = file_handle.read() + + # The config file used to load pipeline compute config. + with open(config["pipeline_config"], "r") as file_handle: + pipeline_config = file_handle.read() + + # Load a PipelineExecutor from the disk files. + load_library = get_global_func("tvm.pipeline_executor.load", allow_missing=False) + module = load_library(load_config, pipeline_config) + + return PipelineModule(module) + + +class PipelineExecutorFactoryModule(object): + """Common interface for pipeline executor factory modules. + + Parameters + ---------- + pipeline_mods : List[GraphExecutorFactoryModule] + List of GraphExecutorFactoryModule. + + mod_config : Dict[int, Dict[str, Any]] + Modules dependency configuration information. + + """ + + def __init__(self, pipeline_mods, mods_config): + self.pipeline_mods = pipeline_mods + self.mods_config = mods_config + self.module = None + + def get_pipeline_executor_module(self): + """Get the pipeline executor module. + + Returns + ------- + module : Module + Common interface for pipeline executor factory Module. + """ + if not self.module: + graph_executors, config = self.graph_executor_create( + self.pipeline_mods, self.mods_config + ) + self.pipeline_create = get_global_func( + "tvm.pipeline_executor.create", allow_missing=False + ) + self.module = self.pipeline_create(graph_executors, config) + return self.module + + def graph_executor_create(self, pipeline_mods, mod_config): + """Create graph_executor list and return configuration as a json string. + + Parameters + ---------- + pipeline_mods : List[GraphExecutorFactoryModule] + List of GraphExecutorFactoryModule + + mod_config : Dict[str, Any] + Modules dependency configuration information. + + Returns + ------- + mods : List[Module] + The Module list. + + mod_config : str + The Modudle configuration. + """ + # Should store modules in the list named 'mods' in index order. + mods = [None for _ in range(len(pipeline_mods))] + for lib_index in pipeline_mods: + pipeline_lib = pipeline_mods[lib_index]["lib"] + dev = pipeline_mods[lib_index]["dev"] + lib = graph_executor.GraphModule(pipeline_lib["default"](dev)) + # Return a module list sorted by lib_index. + mods[lib_index] = lib.module + + return mods, json.dumps(mod_config) + + def export_library(self, directory_path): + """Export the pipeline executor into disk files. + + Parameters + ---------- + directory_path : str + Export the files to this directory. + """ + if not self.pipeline_mods: + raise RuntimeError("The pipeline executor has not been initialized.") + + # Check if the directory_path exists. + if not os.path.exists(directory_path): + raise RuntimeError("The directory {directory_path} does not exist.") + # Create an load configuration. + load_config_file_name = f"{directory_path}/load_config" + pipeline_config_file_name = f"{directory_path}/pipeline_config" + config = {} + config["load_config"] = load_config_file_name + config["pipeline_config"] = pipeline_config_file_name + load_config = [] + # Export the library, JSON, and parameter into files, then export these files path + # into a configuration file. + for lib_index in self.pipeline_mods: + mconfig = {} + mconfig["mod_idx"] = lib_index + mconfig["lib_name"] = f"{directory_path}/lib{lib_index}.so" + mconfig["json_name"] = f"{directory_path}/json{lib_index}" + mconfig["params_name"] = f"{directory_path}/params{lib_index}" + mconfig["dev"] = ( + f"{self.pipeline_mods[lib_index]['dev'].device_type}," + f"{self.pipeline_mods[lib_index]['dev'].device_id}" + ) + # Get the graph, lib, and parameters from GraphExecutorFactoryModule. + lib = self.pipeline_mods[lib_index]["lib"] + # Export the lib, graph, and parameters to disk. + if self.pipeline_mods[lib_index]["export_cc"]: + lib.export_library( + mconfig["lib_name"], cc=self.pipeline_mods[lib_index]["export_cc"] + ) + else: + lib.export_library(mconfig["lib_name"]) + + with open(mconfig["json_name"], "w") as file_handle: + file_handle.write(lib.graph_json) + with open(mconfig["params_name"], "wb") as file_handle: + file_handle.write(runtime.save_param_dict(lib.params)) + + load_config.append(mconfig) + + with open(load_config_file_name, "w") as file_handle: + json.dump(load_config, file_handle) + + with open(pipeline_config_file_name, "w") as file_handle: + json.dump(self.mods_config, file_handle) + + config_file_name = f"{directory_path}/config" + with open(config_file_name, "w") as file_handle: + json.dump(config, file_handle) + + return config_file_name diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pipeline_executor_build.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pipeline_executor_build.py new file mode 100644 index 0000000000000000000000000000000000000000..9a16d1b7afaad6bd0d8234514bd9fc1c1a99defa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/pipeline_executor_build.py @@ -0,0 +1,674 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=f-string-without-interpolation +"""Pipeline executor that executes a series of modules in a pipeline fashion.""" +import json +import os +import tvm._ffi +from tvm import relay +from tvm.relay.transform import InferType +from tvm.contrib.pipeline_executor import PipelineExecutorFactoryModule + + +def pipeline_executor_build_enabled(): + """Check if the pipeline executor build is enabled. + + Return + ------- + enable: bool + Return whether the pipeline executor is enabled. + """ + return tvm.contrib.pipeline_executor.pipeline_executor_enabled() + + +def build(pipe_configs): + """Build modules used in the pipeline executor, then use these modules and configuration + to create a pipeline executor. + + Parameters + ---------- + pipe_configs: PipelineConfig + Build Configuration information. + + Returns + ------- + ret: PipelineExecutorFactoryModule + Common interface for pipeline executor factory modules. + """ + libs = {} + config = pipe_configs.get_config() + if "module_connection" not in config: + raise RuntimeError('"module_connection" is missing') + if "input_connection" not in config: + raise RuntimeError('"input_connection" is missing') + if "param_connection" not in config: + raise RuntimeError('"param_connection" is missing') + + mod_n_configs = config["module_connection"] + config_len = len(mod_n_configs) + module_string_config = [{} for _ in range(config_len)] + # Use hardware configurations to build backend modules for each subgraph. + for ir_mod, mod_config in mod_n_configs.items(): + pipe_config = mod_config["pipeline"].copy() + mod_idx = pipe_config["mod_idx"] + dev = mod_config["dev"] + target = mod_config["target"] + build_func = relay.build + # Callers may need to use a customized building function to wrap the pre-building logic + # and the backend building logic. For example, in order to support a backend which only + # can do "int8" computation, the caller may need to merge the "quantization" logic + # into the building logic to creat a customized building function. + if "build" in mod_config and mod_config["build"]: + build_func = mod_config["build"] + + lib = build_func( + ir_mod, + target, + params=mod_config["params"], + target_host=mod_config["target_host"], + mod_name=mod_config["mod_name"], + ) + + pipe_config["dev"] = f"{dev.device_type},{dev.device_id}" + # Use "mod_idx" as the key to create a "module_connection" map which is not only + # for the module index but also for the module connection used to build the pipeline. + module_string_config[mod_idx] = pipe_config + libs[mod_idx] = { + "lib": lib, + "dev": dev, + "fcompile": mod_config["fcompile"], + "export_cc": mod_config["export_cc"], + } + + # Creating a text form configuration to record the "input_connection" and the + # "module_connection" information. The "input_connection" is used to record the + # map of global input and subgraph input, and the "module_connection" is used to + # record module dependency. + string_config = {} + string_config["param_connection"] = config["param_connection"] + string_config["input_connection"] = config["input_connection"] + string_config["module_connection"] = module_string_config + + return PipelineExecutorFactoryModule(libs, string_config) + + +def export_library(factory, directory_path): + """Export the pipeline executor into disk files. + + Parameters + ---------- + factory : PipelineExecutorFactoryModule + The pipeline executor factory + directory_path : str + Export the files to this directory. + """ + if not factory.pipeline_mods: + raise RuntimeError("The pipeline executor has not been initialized.") + + # Check if the directory_path exists. + if not directory_path or not os.path.exists(directory_path): + raise RuntimeError("The directory {directory_path} does not exist.") + # Create an load configuration. + load_config_file_name = f"{directory_path}/load_config" + pipeline_config_file_name = f"{directory_path}/pipeline_config" + config = {} + config["load_config"] = load_config_file_name + config["pipeline_config"] = pipeline_config_file_name + load_config = [] + # Export the library, JSON, and parameter into files, then export these files path + # into a configuration file. + for lib_index in factory.pipeline_mods: + mconfig = {} + mconfig["mod_idx"] = lib_index + mconfig["lib_name"] = f"{directory_path}/lib{lib_index}.so" + mconfig["json_name"] = f"{directory_path}/json{lib_index}" + mconfig["params_name"] = f"{directory_path}/params{lib_index}" + lib_config = factory.pipeline_mods[lib_index] + mconfig["dev"] = f"{lib_config['dev'].device_type}," f"{lib_config['dev'].device_id}" + fcompile = lib_config["fcompile"] + if not fcompile: + fcompile = False + + # Get the graph, lib, and parameters from GraphExecutorFactoryModule. + lib = factory.pipeline_mods[lib_index]["lib"] + # Export the lib, graph, and parameters to disk. + lib.export_library(mconfig["lib_name"], fcompile=fcompile) + with open(mconfig["json_name"], "w") as file_handle: + file_handle.write(lib.graph_json) + with open(mconfig["params_name"], "wb") as file_handle: + file_handle.write(relay.save_param_dict(lib.params)) + + load_config.append(mconfig) + + with open(load_config_file_name, "w") as file_handle: + json.dump(load_config, file_handle) + + with open(pipeline_config_file_name, "w") as file_handle: + json.dump(factory.mods_config, file_handle) + + config_file_name = f"{directory_path}/config" + with open(config_file_name, "w") as file_handle: + json.dump(config, file_handle) + + return config_file_name + + +class PipelineConfig(object): + """Pipeline configuration information, this class contains the DAG that expresses + the dependency of each module involved in a pipeline and the parameters for building + each module. + """ + + class Binding: + """This class defines the module connections information. + The type can only be "input" or "output". + + Parameters + ---------- + owner : ModuleWrapper + The class who owns this interface. + + io_type : str + The I/O type of this interface. It can only be "input" or "output". + + name : str/integer + Name, for input it is string such as "data0", for output it is an integer such as 0. + + data_type: TensorType + The data type of this interface. + """ + + def __init__(self, owner, io_type, name, data_type=None): + self.io_owner = owner + self.io_type = io_type + self.name = str(name) + # Child interfaces that depend on this interface. + self.bindings = [] + # Parents interfaces that this interface depend on. + self.parents = [] + + self.data_type = data_type + + def get_name(self): + # Return name of this interface and the name of owner who owns this interface. + owner_name = "" + if isinstance(self.io_owner, PipelineConfig.ModuleWrapper): + owner_name = self.io_owner.name + + return owner_name, self.name + + def get_owner_idx(self): + # If the owner is ModuleWrapper return the owner index, if not return 0. + if isinstance(self.io_owner, PipelineConfig.ModuleWrapper): + return self.io_owner.idx + + return -1 + + def is_pipeline_executor_interface(self): + """The pipeline interface is used to interact with the caller. There are two types + of interfaces, one is 'input' another is 'output'. The pipeline input interface + is responsible for passing parameters to the internal module interface, and the + pipeline output interface is responsible for outputting the results computed by + the pipeline executor to the caller. + """ + return not isinstance(self.io_owner, PipelineConfig.ModuleWrapper) + + def __repr__(self): + # Geting the binding information in the form of text. + str_format = f" |{self.name}: " + for binding in self.bindings: + mname, dname = binding.get_name() + str_format += f"{mname}:{dname} " + + return str_format + + def check_binding_dict(self, connection_dict): + """Checking the binding dictionary. + Parameter + --------- + connection_dict : Dict[str, Any] + It is a dictionary of module connections. + """ + if "interface_name" not in connection_dict: + raise RuntimeError('"inteface_name" is missing in global config!"') + if "connection" not in connection_dict: + raise RuntimeError(f'"connection" is missing!"') + # The global interface mapping should be one-to-one. + if not connection_dict["connection"]: + raise RuntimeError("The global interface map is empty!") + if len(connection_dict["connection"]) > 1: + raise RuntimeError("A global interface maps multiple module interfaces!") + if "mod_idx" not in connection_dict["connection"][0]: + raise RuntimeError('"mod_idx" is missing!') + + def get_binding_dict(self): + """Returning the binding information in the form of dictionary. + Returns + ------- + data : Dict[str, Any] + The binding information is in the form of dictionary. + """ + dict_format = {"interface_name": self.name, "connection": []} + for binding in self.bindings: + _, dname = binding.get_name() + midx = binding.get_owner_idx() + dict_format["connection"].append({"mod_idx": midx, "interface_name": dname}) + + self.check_binding_dict(dict_format) + return dict_format + + def check_dag_acyclic(self, start, inputs): + """This is to check whether the DAG containing these input interfaces is acyclic. + Parameters + ---------- + start: ModuleWrapper + The starting node of the cycle check algorithm. + + inputs: Binding + These interfaces are used to connect to each other to build DAG. + + Return + ------ + Return true if there is no cycle in the DAG. + """ + for binding in inputs.values(): + if start == binding.io_owner: + return False + for p in binding.parents: + if not self.check_dag_acyclic(start, p.io_owner.input_bindings.bindings): + return False + + return True + + def connect(self, binding): + """Connect the current interface to the destination interface. + Correct connections are as follows: 1. the pipeline input connected to a module input, + 2. the module output connected to a pipeline output, 3. the module output connected to + a module input. + + Parameters + ---------- + binding: Binding + The destination of this connection. + """ + + # Check whether the binding setting is correct or not. + if self.io_owner == binding.io_owner: + raise RuntimeError("Can not bind itself.") + + if self.io_type == "param" and not self.is_pipeline_executor_interface(): + raise RuntimeError( + 'The "param" binding can only be used by a pipeline executor interface!' + ) + + if not self.is_pipeline_executor_interface() and self.io_type == "input": + raise RuntimeError("Module can only bind from output interface!") + + if self.io_type == "param" and binding.io_type != "param": + raise RuntimeError( + 'A global "param" interface can only be bind with a module "param" interface!' + ) + + if ( + not self.is_pipeline_executor_interface() + and not binding.is_pipeline_executor_interface() + and binding.io_type == "output" + ): + raise RuntimeError("Can not bind module output with another module output!") + + if ( + not self.is_pipeline_executor_interface() + and binding.is_pipeline_executor_interface() + and binding.io_type == "input" + ): + raise RuntimeError("Can not bind module output with pipeline input!") + + if self.is_pipeline_executor_interface() and self.io_type == "output": + raise RuntimeError("Global output can not be used as binding start point.") + + if ( + self.is_pipeline_executor_interface() + and self.io_type == "input" + and binding.io_type != "input" + ): + raise RuntimeError("Global input can only bind with module input.") + + self.bindings.append(binding) + if not self.is_pipeline_executor_interface(): + # Check whether the data types of the source and destination are the same. + if ( + isinstance(binding.io_owner, PipelineConfig.ModuleWrapper) + and self.data_type != binding.data_type + ): + raise RuntimeError( + f"Illegal type (%s vs. %s): binding type is not same!" + % (self.data_type, binding.data_type) + ) + + binding.parents.append(self) + + # Do acyclic check after increasing the in-degree of child node by setting + # current interface as a parent of the child node. + + if not self.check_dag_acyclic( + binding.io_owner, self.io_owner.input_bindings.bindings + ): + raise RuntimeError("Illegal connection: Cause a cycle!") + + class BindingList: + """Container for bindings(input or output interface). + + Parameters + ---------- + owner : ModuleWrapper/PipelineConfig + The owner of this class can be ModuleWrapper or PipelineConfig. + + io_type : str + The type of this class can be "input" or "output". + """ + + def __init__(self, owner, io_type): + self.bindings = {} + self.io_owner = owner + self.binding_type = io_type + + def get_binding_data_type(self, key): + if isinstance(self.io_owner, PipelineConfig.ModuleWrapper): + return self.io_owner.get_data_type(key, self.binding_type) + return None + + def __getitem__(self, key): + if key not in self.bindings: + data_type = self.get_binding_data_type(key) + if not data_type and isinstance(self.io_owner, PipelineConfig.ModuleWrapper): + raise RuntimeError(f"Can not find {key} in binding list {self.binding_type}.") + + self.bindings[key] = PipelineConfig.Binding( + self.io_owner, self.binding_type, key, data_type + ) + + return self.bindings[key] + + class ModuleWrapper: + """This class is a wrapper representing the module and contains information such as + module information, binding information and building information. + """ + + def __init__(self, mod=None): + self.target_host = None + self.build_func = None + self.params = None + self.target = None + self.fcompile = None + self.name = None + self.dev = None + self.export_cc = None + self.cpu_affinity = "" + self.idx = None + self.mod = mod + self.input_params = InferType()(mod)["main"].params + self.output_type = InferType()(mod)["main"].checked_type.ret_type + self.input_bindings = PipelineConfig.BindingList(self, "input") + self.output_bindings = PipelineConfig.BindingList(self, "output") + self.param_binding = PipelineConfig.Binding(self, "param", "param") + + def __eq__(self, other): + if isinstance(other, PipelineConfig.ModuleWrapper): + return self.mod == other.mod + + return False + + def __getitem__(self, key): + if isinstance(key, str): + if key == "input": + return self.input_bindings + + if key == "output": + return self.output_bindings + + if key == "param": + return self.param_binding + + raise RuntimeError(f"{key} not found!") + + raise RuntimeError('The data type of "key" is not supported!') + + def get_data_type(self, key, interface_type): + """Get the module interface data type according to the key value and interface type. + Parameters + ---------- + key: str + The interface name. + + interface_type: + The interface type. + + Return + ------- + Return data type. + """ + if interface_type == "input": + for param in self.input_params: + if param.name_hint == key: + return param._checked_type_ + + if interface_type == "output": + if isinstance(self.output_type, tvm.ir.type.TupleType): + if int(key) < len(self.output_type.fields): + return self.output_type.fields[int(key)] + elif int(key) == 0: + return self.output_type + + return None + + def set_idx_name(self, idx): + # Set the index value and generate the module name. + self.idx = idx + self.name = f"mod{str(idx)}" + + def is_root_mod(self): + """Check whether this node is the root node in DAG, this function is used + in topological sort. + """ + return all([not b.parents for b in self.input_bindings.bindings.values()]) + + def remove_self_from_bindings(self): + """Remove the current node from child dependencies to reduce the in-degree + of child node, this function is used in topological sort. + """ + for binding in self.output_bindings.bindings.values(): + for child in binding.bindings: + if binding in child.parents: + child.parents.remove(binding) + + def __init__(self): + self.mod_wrapper = {} + self.input_bindings = self.BindingList(self, "input") + self.output_bindings = self.BindingList(self, "output") + # There is a map of global parameters group and module index. + self.param_group_bindings = self.BindingList(self, "param") + + def __str__(self): + # Get configuration information as a string. + + # Use topological sort to get correct module order. + self.dag_topology_sort() + # Getting the parameters dependencies. + param_dump = "Params\n" + for param_name in self.param_group_bindings.bindings: + inf = self.param_group_bindings.bindings[param_name] + param_dump += str(inf) + "\n" + # Get the input dependencies. + input_dump = "\nInputs\n" + for input_name in self.input_bindings.bindings: + inf = self.input_bindings.bindings[input_name] + input_dump += str(inf) + "\n" + + # Get the connections information of each module. + output = {} + connections_dump = "\nconnections\n" + for mod in self.mod_wrapper: + for interface in self.mod_wrapper[mod].output_bindings.bindings.values(): + if interface.bindings: + mname, dname = interface.get_name() + iname = mname + ".output(" + dname + ")->" + for dep in interface.bindings: + dep_mname, dep_dname = dep.get_name() + if isinstance(dep.io_owner, PipelineConfig.ModuleWrapper): + iname += f" {dep_mname}.{dep_dname}" + connections_dump += f" |{iname}\n" + else: + output[dep_dname] = f"{mname}.output({dname})" + + # Get the output dependencies. + output_dump = "\noutput\n" + for name in sorted(output.keys()): + output_dump += f" |output({name}) : {output[name]}\n" + + return param_dump + input_dump + output_dump + connections_dump + + def __getitem__(self, key): + if isinstance(key, tvm.ir.module.IRModule): + if key not in self.mod_wrapper: + self.mod_wrapper[key] = self.ModuleWrapper(key) + return self.mod_wrapper[key] + + if isinstance(key, str): + if key == "input": + return self.input_bindings + if key == "output": + return self.output_bindings + if key == "param_group": + return self.param_group_bindings + + raise RuntimeError(f"{key} not found!") + + raise RuntimeError(f'The key type "{type(key)}" is not supported!') + + def get_config(self): + """Get the configuration information in dictionary form, this configuration + will be used to create pipeline executor. + """ + + # Use topological sort to get the correct order of modules. + self.dag_topology_sort() + mconfig = {} + module_connection = {} + for mod in self.mod_wrapper: + # Generate pipeline configuration. + mconf = {} + output_conf = [] + module = self.mod_wrapper[mod] + for _, binding in module.output_bindings.bindings.items(): + dep_conf = [] + output = {} + if binding.bindings: + for dep in binding.bindings: + dep_item = {} + _, dname = dep.get_name() + if dep.is_pipeline_executor_interface(): + dep_item["global_output_index"] = int(dname) + else: + dep_item["mod_idx"] = dep.get_owner_idx() + dep_item["input_name"] = dname + dep_conf.append(dep_item) + + # The value of output_idx start from 0. + output["output_idx"] = int(binding.name) + output["dependencies"] = dep_conf + output_conf.append(output) + + mconf["mod_idx"] = module.idx + mconf["cpu_affinity"] = module.cpu_affinity + mconf["output"] = output_conf + + module_connection[mod] = { + "pipeline": mconf, + "target_host": module.target_host, + "mod_name": "default", + "build": module.build_func, + "params": module.params, + "target": module.target, + "fcompile": module.fcompile, + "dev": module.dev, + "export_cc": module.export_cc, + } + + # Creating a map including pipeline inputs and subgraph inputs. + input_connection = [] + for input_name in self.input_bindings.bindings: + input_dict = self.input_bindings.bindings[input_name].get_binding_dict() + if "interface_name" not in input_dict["connection"][0]: + raise RuntimeError("interface_name is missing in connection config!") + # Creating the map including global interfaces and subgraph interfaces. + input_map = { + "global_interface_name": input_dict["interface_name"], + "mod_idx": input_dict["connection"][0]["mod_idx"], + "module_interface_name": input_dict["connection"][0]["interface_name"], + } + input_connection.append(input_map) + + # Create a map including global parameters groups and modules. + param_connection = [] + for param_name in self.param_group_bindings.bindings: + param_dict = self.param_group_bindings.bindings[param_name].get_binding_dict() + param_map = { + "global_param_name": param_dict["interface_name"], + "mod_idx": param_dict["connection"][0]["mod_idx"], + } + param_connection.append(param_map) + + mconfig["module_connection"] = module_connection + mconfig["input_connection"] = input_connection + mconfig["param_connection"] = param_connection + return mconfig + + def dag_topology_sort(self): + """Use topological sort to get order of pipeline modules.""" + mlist = [] + mod_wrapper = self.mod_wrapper.copy() + while mod_wrapper: + temp_list = [] + for mod, wrapper in mod_wrapper.items(): + if wrapper.is_root_mod(): + temp_list.append(mod) + wrapper.remove_self_from_bindings() + + for mod in temp_list: + mod_wrapper.pop(mod, None) + + mlist += temp_list + + mod_wrapper_sort = {} + for mod, i in zip(mlist, range(len(mlist))): + self.mod_wrapper[mod].set_idx_name(i) + mod_wrapper_sort[mod] = self.mod_wrapper[mod] + + self.mod_wrapper = mod_wrapper_sort + + def get_mod_idx(self, mod): + # Return the module index. + idx = self.mod_wrapper[mod].idx + return idx + + def pipe_input(self, name): + # Return the input interface according to the name. + return self.input_bindings[name] + + def pipe_output(self, idx): + # Return the output interface according to the name. + return self.output_bindings[idx] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/popen_pool.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/popen_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..d16cf31bc7bf06033207e6a2f4a8a78b40e8724d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/popen_pool.py @@ -0,0 +1,452 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Multiprocessing via Popen. + +This module provides a multi-processing pool backed by Popen. +with additional timeout support. +""" +import os +import sys +import struct +import threading +import subprocess +import concurrent.futures +from enum import IntEnum +from collections import namedtuple +import pickle + + +def kill_child_processes(pid): + """Kill all child processes recursively for a given pid. + + Parameters + ---------- + pid : int + The given parameter id. + """ + # pylint: disable=import-outside-toplevel + import psutil + + try: + parent = psutil.Process(pid) + children = parent.children(recursive=True) + except psutil.NoSuchProcess: + return + + for process in children: + try: + process.kill() + except psutil.NoSuchProcess: + pass + + +class StatusKind(IntEnum): + """Running and return value status.""" + + RUNNING = 0 + COMPLETE = 1 + EXCEPTION = 2 + TIMEOUT = 3 + + +class MapResult(namedtuple("MapResult", ["status", "value"])): + """Result of map_with_error_catching. + + Parameters + ---------- + status : StatusKind + The status of the result. + + value : Any + The result value. + """ + + __slots__ = [] + + +class PopenWorker: + """A subprocess worker via Popen. + + PopenWorker provides a low-level + API to interact with a separate process via Popen. + + Parameters + ---------- + initializer: callable or None + A callable initializer, or None + + initargs: Tuple[object] + A tuple of args for the initializer + + maximum_uses: Optional[int] + The maximum number of times a process can be used before being recycled, + i.e. killed and restarted. If `None`, the process will be reused until + an operation times out. + + stdout: Union[None, int, IO[Any]] + The standard output streams handler specified for the popen process. + + stderr: Union[None, int, IO[Any]] + The standard error streams handler specified for the popen process. + """ + + def __init__(self, initializer=None, initargs=(), maximum_uses=None, stdout=None, stderr=None): + self._proc = None + self._initializer = initializer + self._initargs = initargs + self._maximum_uses = maximum_uses + self._remaining_uses = None + self._stdout = stdout + self._stderr = stderr + + if self._initializer is not None and not callable(self._initializer): + raise TypeError("initializer must be callable for PopenWorker") + + def __del__(self): + try: + self.kill() + except ImportError: + pass + + def kill(self): + """Kill the current running process and cleanup. + + Note + ---- + The worker can start a new process when send is called again. + """ + if self._proc is not None: + # allow gracefully shutdown + try: + self._writer.close() + except IOError: + pass + try: + self._reader.close() + except IOError: + pass + # kill all child processes recursively + try: + kill_child_processes(self._proc.pid) + except TypeError: + pass + try: + self._proc.kill() + except OSError: + pass + + # Join the child process to avoid zombie processes + self.join(timeout=1.0) + self._proc = None + self._remaining_uses = None + + def _start(self): + """Start a new subprocess if nothing is available""" + if self._proc is not None: + return + + # connect subprocess with a pair of pipes + main_read, worker_write = os.pipe() + worker_read, main_write = os.pipe() + + cmd = [sys.executable, "-m", "tvm.exec.popen_worker"] + if sys.platform == "win32": + # pylint: disable=import-outside-toplevel + import msvcrt + + worker_read_handle = msvcrt.get_osfhandle(worker_read) + worker_write_handle = msvcrt.get_osfhandle(worker_write) + os.set_handle_inheritable(worker_read_handle, True) + os.set_handle_inheritable(worker_write_handle, True) + cmd += [str(worker_read_handle), str(worker_write_handle)] + self._proc = subprocess.Popen( + cmd, close_fds=False, stdout=self._stdout, stderr=self._stderr + ) + else: + cmd += [str(worker_read), str(worker_write)] + self._proc = subprocess.Popen( + cmd, pass_fds=(worker_read, worker_write), stdout=self._stdout, stderr=self._stderr + ) + + # close worker side of the pipe + os.close(worker_read) + os.close(worker_write) + self._reader = os.fdopen(main_read, "rb") + self._writer = os.fdopen(main_write, "wb") + + def join(self, timeout=None): + """Join the current process worker before it terminates. + + Parameters + ---------- + timeout: Optional[number] + Timeout value, block at most timeout seconds if it + is a positive number. + """ + if self._proc: + try: + self._proc.wait(timeout) + except subprocess.TimeoutExpired: + pass + + def is_alive(self): + """Check if the process is alive""" + if self._proc: + return self._proc.poll() is None + return False + + def send(self, fn, args=(), kwargs=None, timeout=None): + """Send a new function task fn(*args, **kwargs) to the subprocess. + + Parameters + ---------- + fn : function + The function to be invoked. + + args : list + Positional argument. + + kwargs : dict + Keyword arguments + + timeout : float + Timeout value when executing the function + + Note + ---- + The caller must call recv before calling the next send in + order to make sure the timeout and child process exit + won't affect the later requests. + """ + # use cloud pickle + # pylint: disable=import-outside-toplevel + import cloudpickle + + if self._proc is not None and self._maximum_uses and self._remaining_uses == 0: + # Time to recycle the process. + self.kill() + + if self._proc is None: + self._start() + # init + if self._initializer is not None: + self.send(self._initializer, self._initargs) + self.recv() + + # N.B. The initializer doesn't count as a "use" + self._remaining_uses = self._maximum_uses + kwargs = {} if not kwargs else kwargs + data = cloudpickle.dumps((fn, args, kwargs, timeout), protocol=pickle.HIGHEST_PROTOCOL) + try: + self._writer.write(struct.pack(" MapResult: + # pylint: disable=broad-except + try: + return MapResult(status=StatusKind.COMPLETE, value=self._worker_run(fn, args, kwargs)) + except TimeoutError as exception: + return MapResult(status=StatusKind.TIMEOUT, value=exception) + except Exception as exception: + return MapResult(status=StatusKind.EXCEPTION, value=exception) + + def submit(self, fn, *args, **kwargs) -> concurrent.futures.Future: + """Submit a new function job to the pool + + Parameters + ---------- + fn : function + The function to be invoked. + + args : list + Positional argument. + + kwargs : dict + Keyword arguments + + Returns + ------- + future : concurrent.futures.Future + A future that can be used to access the result. + """ + # pylint: disable=unnecessary-lambda + worker = lambda *args: self._worker_run(*args) + return self._threadpool.submit(worker, fn, args, kwargs) + + def map_with_error_catching(self, fn, iterator): + """Same as map, but catches exceptions and return them instead. + + Parameters + ---------- + fn : function + The function to be invoked. + + iterator : Iterator + Input iterator. + + Returns + ------- + out_iter : Iterator[MapResult] + The result iterator. + """ + worker = lambda x: self._worker_run_with_error_catching(fn, (x,), None) + return self._threadpool.map(worker, iterator) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/random.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/random.py new file mode 100644 index 0000000000000000000000000000000000000000..bbc74fccac94b8b917224cd18949214463aa14a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/random.py @@ -0,0 +1,115 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to random library.""" +import tvm +from tvm import te +import tvm._ffi + + +def randint(low, high, size, dtype="int32"): + """Return random integers from low (inclusive) to high (exclusive). + Return random integers from the "discrete uniform" distribution of the + specified dtype in the "half-open" interval [low, high). + + Parameters + ---------- + low : int + Lowest (signed) integer to be drawn from the distribution + high : int + One above the largest (signed) integer to be drawn from the distribution + + Returns + ------- + out : Tensor + A tensor with specified size and dtype + """ + assert "int" in dtype, "the type of randint output must be int or uint" + return te.extern( + size, + [], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.random.randint", int(low), int(high), outs[0] + ), + dtype=dtype, + ) + + +def uniform(low, high, size): + """Draw samples from a uniform distribution. + + Samples are uniformly distributed over the half-open interval [low, high) + (includes low, but excludes high). In other words, any value within the + given interval is equally likely to be drawn by uniform. + + Parameters + ---------- + low : float + Lower boundary of the output interval. All values generated will be + greater than or equal to low. + high : float + Upper boundary of the output interval. All values generated will be + less than high. + size : tuple of ints + Output shape. If the given shape is, e.g., (m, n, k), then m * n * k + samples are drawn. + + Returns + ------- + out : Tensor + A tensor with specified size and dtype. + """ + return te.extern( + size, + [], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.random.uniform", float(low), float(high), outs[0] + ), + dtype="float32", + ) + + +def normal(loc, scale, size): + """Draw samples from a normal distribution. + + Return random samples from a normal distribution. + + Parameters + ---------- + loc : float + loc of the distribution. + scale : float + Standard deviation of the distribution. + size : tuple of ints + Output shape. If the given shape is, e.g., (m, n, k), then m * n * k + samples are drawn. + + Returns + ------ + out : Tensor + A tensor with specified size and dtype + """ + return te.extern( + size, + [], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.random.normal", float(loc), float(scale), outs[0] + ), + dtype="float32", + ) + + +tvm._ffi._init_api("tvm.contrib.random") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rocblas.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rocblas.py new file mode 100644 index 0000000000000000000000000000000000000000..70791dca3152d5e92965894aadfd8b6e63513355 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rocblas.py @@ -0,0 +1,83 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""External function interface to rocBLAS libraries.""" +import tvm +from tvm import te + + +def matmul(lhs, rhs, transa=False, transb=False): + """Create an extern op that compute matrix mult of A and rhs with rocBLAS + + Parameters + ---------- + lhs : Tensor + The left matrix operand + rhs : Tensor + The right matrix operand + transa : bool + Whether transpose lhs + transb : bool + Whether transpose rhs + + Returns + ------- + C : Tensor + The result tensor. + """ + n = lhs.shape[1] if transa else lhs.shape[0] + m = rhs.shape[0] if transb else rhs.shape[1] + return te.extern( + (n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.rocblas.matmul", ins[0], ins[1], outs[0], transa, transb + ), + name="C", + ) + + +def batch_matmul(lhs, rhs, transa=False, transb=False): + """Create an extern op that compute matrix mult of A and rhs with rocBLAS + + Parameters + ---------- + lhs : Tensor + The left batched matrix operand + rhs : Tensor + The right batched matrix operand + transa : bool + Whether transpose lhs + transb : bool + Whether transpose rhs + + Returns + ------- + C : Tensor + The result tensor. + """ + batch_size = lhs.shape[0] + assert batch_size == rhs.shape[0] + n = lhs.shape[2] if transa else lhs.shape[1] + m = rhs.shape[1] if transb else rhs.shape[2] + return te.extern( + (batch_size, n, m), + [lhs, rhs], + lambda ins, outs: tvm.tir.call_packed( + "tvm.contrib.rocblas.batch_matmul", ins[0], ins[1], outs[0], transa, transb + ), + name="C", + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rocm.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rocm.py new file mode 100644 index 0000000000000000000000000000000000000000..119a2c588c991edb61859ff47dcca364c022715f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rocm.py @@ -0,0 +1,284 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Utility for ROCm backend""" +import re +import subprocess +import os +from os.path import join, exists + +import tvm._ffi +from tvm._ffi.base import py_str +import tvm.runtime +import tvm.target + +from . import utils + + +def find_lld(required=True): + """Find ld.lld in system. + + Parameters + ---------- + required : bool + Whether it is required, + runtime error will be raised if the compiler is required. + + Returns + ------- + valid_list : list of str + List of possible paths. + + Note + ---- + This function will first search ld.lld that + matches the major llvm version that built with tvm + """ + lld_list = [] + major = tvm.target.codegen.llvm_version_major(allow_none=True) + if major is not None: + lld_list += [f"ld.lld-{major}.0"] + lld_list += [f"ld.lld-{major}"] + lld_list += ["ld.lld"] + lld_list += [f"/opt/rocm/llvm/bin/{x}" for x in lld_list] + valid_list = [utils.which(x) for x in lld_list] + valid_list = [x for x in valid_list if x] + if not valid_list and required: + raise RuntimeError("cannot find ld.lld, candidates are: " + str(lld_list)) + return valid_list + + +def rocm_link(in_file, out_file, lld=None): + """Link relocatable ELF object to shared ELF object using lld + + Parameters + ---------- + in_file : str + Input file name (relocatable ELF object file) + + out_file : str + Output file name (shared ELF object file) + + lld : str, optional + The lld linker, if not specified, + we will try to guess the matched clang version. + """ + + # if our result has undefined symbols, it will fail to load + # (hipModuleLoad/hipModuleLoadData), but with a somewhat opaque message + # so we have ld.lld check this here. + # If you get a complaint about missing symbols you might want to check the + # list of bitcode files below. + args = [ + lld if lld is not None else find_lld()[0], + "--no-undefined", + "-shared", + in_file, + "-o", + out_file, + ] + proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = "Linking error using ld.lld:\n" + msg += py_str(out) + raise RuntimeError(msg) + + +@tvm._ffi.register_func("tvm_callback_rocm_link") +def callback_rocm_link(obj_bin): + """Links object file generated from LLVM to HSA Code Object + + Parameters + ---------- + obj_bin : bytearray + The object file + + Return + ------ + cobj_bin : bytearray + The HSA Code Object + """ + tmp_dir = utils.tempdir() + tmp_obj = tmp_dir.relpath("rocm_kernel.o") + tmp_cobj = tmp_dir.relpath("rocm_kernel.co") + with open(tmp_obj, "wb") as out_file: + out_file.write(bytes(obj_bin)) + rocm_link(tmp_obj, tmp_cobj) + cobj_bin = bytearray(open(tmp_cobj, "rb").read()) + return cobj_bin + + +@tvm._ffi.register_func("tvm_callback_rocm_bitcode_path") +def callback_rocm_bitcode_path(rocdl_dir=None): + """Utility function to find ROCm device library bitcodes + + Parameters + ---------- + rocdl_dir : str + The path to rocm library directory + The default value is the standard location + """ + # seems link order matters. + + if rocdl_dir is None: + if exists("/opt/rocm/amdgcn/bitcode/"): + rocdl_dir = "/opt/rocm/amdgcn/bitcode/" # starting with rocm 3.9 + else: + rocdl_dir = "/opt/rocm/lib/" # until rocm 3.8 + + bitcode_names = [ + "oclc_daz_opt_on", + "ocml", + "irif", # this does not exist in rocm 3.9, drop eventually + "oclc_correctly_rounded_sqrt_off", + "oclc_correctly_rounded_sqrt_on", + "oclc_daz_opt_off", + "oclc_finite_only_off", + "oclc_finite_only_on", + # todo (t-vi): an alternative might be to scan for the + "oclc_isa_version_803", + "oclc_isa_version_900", # isa version files (if the linker throws out + "oclc_isa_version_906", # the unneeded ones or we filter for the arch we need) + "oclc_isa_version_1030", + "oclc_unsafe_math_off", + "oclc_unsafe_math_on", + "oclc_wavefrontsize64_on", + "oclc_abi_version_500", + ] + + bitcode_files = [] + for n in bitcode_names: + p = join(rocdl_dir, n + ".bc") # rocm >= 3.9 + if not exists(p): # rocm <= 3.8 + p = join(rocdl_dir, n + ".amdgcn.bc") + if exists(p): + bitcode_files.append(p) + elif "isa_version" not in n and n not in {"irif"}: + raise RuntimeError("could not find bitcode " + n) + + return tvm.runtime.convert(bitcode_files) + + +def parse_compute_version(compute_version): + """Parse compute capability string to divide major and minor version + + Parameters + ---------- + compute_version : str + compute capability of a GPU (e.g. "6.0") + + Returns + ------- + major : int + major version number + minor : int + minor version number + """ + split_ver = compute_version.split(".") + try: + major = int(split_ver[0]) + minor = int(split_ver[1]) + return major, minor + except (IndexError, ValueError) as err: + # pylint: disable=raise-missing-from + raise RuntimeError("Compute version parsing error: " + str(err)) + + +def have_matrixcore(compute_version=None): + """Either MatrixCore support is provided in the compute capability or not + + Parameters + ---------- + compute_version : str, optional + compute capability of a GPU (e.g. "7.0"). + + Returns + ------- + have_matrixcore : bool + True if MatrixCore support is provided, False otherwise + """ + if compute_version is None: + if tvm.rocm(0).exist: + compute_version = tvm.rocm(0).compute_version + else: + raise RuntimeError("No ROCm runtime found") + major, _ = parse_compute_version(compute_version) + # matrix core first introduced in 8.0 + if major >= 8: + return True + + return False + + +@tvm._ffi.register_func("tvm_callback_rocm_get_arch") +def get_rocm_arch(rocm_path="/opt/rocm"): + """Utility function to get the AMD GPU architecture + + Parameters + ---------- + rocm_path : str + The path to rocm installation directory + + Returns + ------- + gpu_arch : str + The AMD GPU architecture + """ + gpu_arch = "gfx900" + # check if rocm is installed + if not os.path.exists(rocm_path): + print("ROCm not detected, using default gfx900") + return gpu_arch + try: + # Execute rocminfo command + rocminfo_output = subprocess.check_output([f"{rocm_path}/bin/rocminfo"]).decode("utf-8") + + # Use regex to match the "Name" field + match = re.search(r"Name:\s+(gfx\d+[a-zA-Z]*)", rocminfo_output) + if match: + gpu_arch = match.group(1) + return gpu_arch + except subprocess.CalledProcessError: + print( + f"Unable to execute rocminfo command, \ + please ensure ROCm is installed and you have an AMD GPU on your system.\ + using default {gpu_arch}." + ) + return gpu_arch + + +def find_rocm_path(): + """Utility function to find ROCm path + + Returns + ------- + path : str + Path to ROCm root. + """ + if "ROCM_PATH" in os.environ: + return os.environ["ROCM_PATH"] + cmd = ["which", "hipcc"] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + out = out.decode("utf-8").strip() + if proc.returncode == 0: + return os.path.realpath(os.path.join(out, "../..")) + rocm_path = "/opt/rocm" + if os.path.exists(os.path.join(rocm_path, "bin/hipcc")): + return rocm_path + raise RuntimeError("Cannot find ROCm path") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rpc.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rpc.py new file mode 100644 index 0000000000000000000000000000000000000000..acdd3df88d5445376f4e4d7d5ae2c1d17c86d790 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/rpc.py @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Deprecation RPC module""" +# pylint: disable=unused-import +from __future__ import absolute_import as _abs +import warnings +from ..rpc import Server, RPCSession, LocalSession, TrackerSession, connect, connect_tracker + +warnings.warn( + "Please use tvm.rpc instead of tvm.conrtib.rpc. tvm.contrib.rpc is going to be removed in 0.5", + DeprecationWarning, +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/sdaccel.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/sdaccel.py new file mode 100644 index 0000000000000000000000000000000000000000..478436e3d5c7d45a7e114bf106a4394004b074e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/sdaccel.py @@ -0,0 +1,99 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Utility for Interacting with SDAccel Tools""" +import os +import subprocess + +import tvm._ffi + +from . import utils + + +@tvm._ffi.register_func("tvm_callback_sdaccel_compile") +def compile_vhls(kernel_info, target): + """Compile Vivado HLS code for SDAccel. + + Parameters + ---------- + kernel_info : list of (str, str) + List of kernel information. The kernel information is a tuple of + function name and source code. + + target : tvm.target.Target + The compilation target + + Return + ------ + xclbin : bytearray + The bytearray of the xclbin + """ + device_name = target.attrs.get("device", "") + tmp_dir = utils.tempdir() + + sdk = os.environ.get("XILINX_SDX", None) + xocc = os.path.join(sdk, "bin/xocc") if sdk else "xocc" + target = os.environ.get( + "XCL_TARGET", "sw_emu" if os.environ.get("XCL_EMULATION_MODE") else "hw" + ) + advanced_params = [ + "--xp", + "param:compiler.preserveHlsOutput=1", + "--xp", + "param:compiler.generateExtraRunData=true", + ] + platform = device_name + if not platform: + platform = os.environ.get("XCL_PLATFORM", os.environ.get("AWS_PLATFORM")) + + if platform is None: + raise RuntimeError("No Xilinx device specified.") + + tmp_xo_files = [] + for funcname, code in kernel_info: + funcname = funcname.value + code = code.value + + tmp_cpp = tmp_dir.relpath(funcname + ".cpp") + tmp_xo = tmp_dir.relpath(funcname + ".xo") + + with open(tmp_cpp, "wb") as out_file: + out_file.write(bytes(code)) + + # build xo + args = ( + [xocc, "-c", "-t", target, "--platform", platform, "-o", tmp_xo, "-k", funcname] + + advanced_params + + [tmp_cpp] + ) + returncode = subprocess.call(args) + if returncode != 0: + raise RuntimeError("Compile error") + + tmp_xo_files.append(tmp_xo) + + # build xclbin + tmp_xclbin = tmp_dir.relpath("output.xclbin") + args = ( + [xocc, "-l", "-t", target, "--platform", platform, "-o", tmp_xclbin] + + tmp_xo_files + + advanced_params + ) + returncode = subprocess.call(args) + if returncode != 0: + raise RuntimeError("Link error") + + return bytearray(open(tmp_xclbin, "rb").read()) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/sparse.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..d561c5cbb1c7a5ca8b89ea21a2ba8954fb0fb4ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/sparse.py @@ -0,0 +1,204 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Tensor and Operation class for computation declaration.""" +# pylint: disable=invalid-name +import warnings +import numpy as _np +from tvm.runtime import ndarray as _nd +from tvm import te +from tvm.tir import expr as _expr +from tvm.te import tensor as _tensor + + +float32 = "float32" +itype = "int32" + + +class CSRNDArray(object): + """Sparse tensor object in CSR format.""" + + def __init__(self, arg1, device=None, shape=None): + """Construct a sparse matrix in CSR format. + + Parameters + ---------- + arg1 : numpy.ndarray or a tuple with (data, indices, indptr) + The corresponding a dense numpy array, + or a tuple for constructing a sparse matrix directly. + + device: Device + The corresponding device. + + shape : tuple of int + The shape of the array + """ + if isinstance(arg1, tuple): + assert len(arg1) == 3 + self.data, self.indices, self.indptr = arg1 + self.shape = shape + elif isinstance(arg1, _np.ndarray): + source_array = arg1 + ridx, cidx = _np.nonzero(source_array) + data = source_array[ridx, cidx] + self.data = _nd.array(data, device) + indices = _np.nonzero(source_array)[1].astype(itype) + self.indices = _nd.array(indices, device) + indptr = [0] + _np.apply_along_axis( + _np.count_nonzero, axis=1, arr=source_array + ).tolist() + indptr = _np.cumsum(_np.array(indptr, itype)).astype(itype) + self.indptr = _nd.array(indptr, device) + self.shape = source_array.shape + else: + raise RuntimeError( + f"Construct CSRNDArray with either a tuple (data, indices, indptr) " + f"or a numpy.array, can't handle type {type(arg1)}." + ) + self.stype = "csr" + self.dtype = self.data.dtype + assert self.shape is not None + assert isinstance(self.data, _nd.NDArray) + assert isinstance(self.indices, _nd.NDArray) + assert str(self.indices.dtype) == "int32" or str(self.indices.dtype) == "int64", str( + self.indices.dtype + ) + assert isinstance(self.indptr, _nd.NDArray) + assert str(self.indptr.dtype) == "int32" or str(self.indptr.dtype) == "int64", str( + self.indptr.dtype + ) + + def asnumpy(self): + """Construct a full matrix and convert it to numpy array. This API will be deprecated + in TVM v0.8 release. Please use `numpy` instead.""" + warnings.warn( + "CSRNDArray.asnumpy() will be deprecated in TVM v0.8 release. " + "Please use CSRNDArray.numpy() instead.", + DeprecationWarning, + ) + return self.numpy() + + def numpy(self): + """Construct a full matrix and convert it to numpy array.""" + full = _np.zeros(self.shape, self.dtype) + ridx = _np.diff(self.indptr.numpy()) + ridx = _np.hstack((_np.ones((v,), itype) * i for i, v in enumerate(ridx))) + full[ridx, self.indices.numpy().astype(itype)] = self.data.numpy() + return full + + +def array(source_array, device=None, shape=None, stype="csr"): + """Construct a sparse NDArray from numpy.ndarray""" + ret = None + if stype == "csr": + ret = CSRNDArray(source_array, shape=shape, device=device) + else: + raise NotImplementedError(f"stype={stype} is not supported yet.") + return ret + + +class SparsePlaceholderOp(object): + """Placeholder class for sparse tensor representations.""" + + def __init__(self, shape, nonzeros, dtype, name): + # pylint: disable=unused-argument + """Contructing a bare bone structure for a sparse matrix + + Parameters + ---------- + shape: Tuple of Expr + The shape of the tensor + + nonzeros: int + The number of non-zero values + + dtype: str, optional + The data type of the tensor + + name: str, optional + The name hint of the tensor + """ + self.shape = shape + self.dtype = dtype + self.name = name + self.stype = "unknown" + + +class CSRPlaceholderOp(SparsePlaceholderOp): + """Placeholder class for CSR based sparse tensor representation.""" + + def __init__(self, shape, nonzeros, dtype, name): + """Contructing a bare bone structure for a csr_matrix + + Parameters + ---------- + shape: Tuple of Expr + The shape of the tensor + + nonzeros: int + The number of non-zero values + + dtype: str, optional + The data type of the tensor + + name: str, optional + The name hint of the tensor + """ + SparsePlaceholderOp.__init__(self, shape, nonzeros, dtype, name) + self.stype = "csr" + self.data = te.placeholder((nonzeros,), dtype=dtype, name=self.name + "_data") + self.indices = te.placeholder((nonzeros,), dtype=itype, name=self.name + "_indices") + self.indptr = te.placeholder((self.shape[0] + 1,), dtype=itype, name=self.name + "_indptr") + assert isinstance(self.data, _tensor.Tensor) + assert isinstance(self.indices, _tensor.Tensor) + assert isinstance(self.indptr, _tensor.Tensor) + + +def placeholder(shape, nonzeros=None, dtype=None, name="placeholder", stype=None): + """Construct an empty sparse tensor object. + + Parameters + ---------- + shape: Tuple of Expr + The shape of the tensor + + nonzeros: int + The number of non-zero values + + dtype: str, optional + The data type of the tensor + + name: str, optional + The name hint of the tensor + + stype: str, optional + The name storage type of the sparse tensor (e.g. csr, coo, ell) + + Returns + ------- + tensor: SparsePlaceholderOp + The created sparse tensor placeholder + """ + shape = (shape,) if isinstance(shape, _expr.PrimExpr) else shape + nonzeros = 0 if nonzeros is None else nonzeros + dtype = float32 if dtype is None else dtype + stype = "csr" if stype is None else stype + ret = None + if stype == "csr": + ret = CSRPlaceholderOp(shape=shape, nonzeros=nonzeros, dtype=dtype, name=name) + else: + raise NotImplementedError(f"stype={stype} is not supported yet.") + return ret diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/spirv.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/spirv.py new file mode 100644 index 0000000000000000000000000000000000000000..94b24d0c7b09ff4a8b6cf17450722da00889fa29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/spirv.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Utility for Interacting with SPIRV Tools""" +import subprocess +import os +from . import utils +from .._ffi.base import py_str + + +def optimize(spv_bin): + """Optimize SPIRV using spirv-opt via CLI + + Note that the spirv-opt is still experimental. + + Parameters + ---------- + spv_bin : bytearray + The spirv file + + Return + ------ + cobj_bin : bytearray + The HSA Code Object + """ + + tmp_dir = utils.tempdir() + tmp_in = tmp_dir.relpath("input.spv") + tmp_out = tmp_dir.relpath("output.spv") + with open(tmp_in, "wb") as out_file: + out_file.write(bytes(spv_bin)) + + sdk = os.environ.get("VULKAN_SDK", None) + cmd = os.path.join(sdk, "bin/spirv-opt") if sdk else "spirv-opt" + args = [cmd, "-O", tmp_in, "-o", tmp_out] + proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = "Opitmizationerror using spirv-opt:\n" + msg += py_str(out) + raise RuntimeError(msg) + + return bytearray(open(tmp_out, "rb").read()) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/stackvm.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/stackvm.py new file mode 100644 index 0000000000000000000000000000000000000000..458d69235db58e5729906a732143edce5fd4a641 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/stackvm.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Dummy StackVM build function.""" +# pylint: disable=invalid-name +from __future__ import absolute_import as _abs +import shutil + + +def build(output, files): + """Simply copy StackVM output to the destination. + + Parameters + ---------- + output : str + The target StackVM file. + + files : list + A single self-contained StackVM module file. + """ + + if len(files) == 0: + raise RuntimeError("StackVM artifact must be provided") + if len(files) > 1: + raise RuntimeError("Unexpected multiple StackVM artifacts") + + shutil.copy(files[0], output) + + +# assign output format +build.output_format = "stackvm" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tar.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tar.py new file mode 100644 index 0000000000000000000000000000000000000000..67175b8b278c3b007b5e0b956ed51c0904207031 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tar.py @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Util to invoke tarball in the system.""" +# pylint: disable=invalid-name +from __future__ import absolute_import as _abs +import os +import shutil +import subprocess +from . import utils +from .._ffi.base import py_str + + +def tar(output, files): + """Create tarball containing all files in root. + + Parameters + ---------- + output : str + The target shared library. + + files : list + List of files to be bundled. + """ + cmd = ["tar"] + cmd += ["-czf"] + temp = utils.tempdir() + fset = set() + for fname in files: + base = os.path.basename(fname) + if base in fset: + raise ValueError(f"duplicate file name {base}") + fset.add(base) + shutil.copy(fname, temp.relpath(base)) + cmd += [output] + cmd += ["-C", temp.temp_dir] + cmd += temp.listdir() + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = "Tar error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + +# assign output format +tar.output_format = "tar" + + +def untar(tar_file, directory): + """Unpack all tar files into the directory + + Parameters + ---------- + tar_file : str + The source tar file. + + directory : str + The target directory + """ + cmd = ["tar"] + cmd += ["-xf"] + cmd += [tar_file] + cmd += ["-C", directory] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = "Tar error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + +def normalize_file_list_by_unpacking_tars(temp, file_list): + """Normalize the file list by unpacking tars in list. + + When a filename is a tar, it will untar it into an unique dir + in temp and return the list of files in the tar. + When a filename is a normal file, it will be simply added to the list. + + This is useful to untar objects in tar and then turn + them into a library. + + Parameters + ---------- + temp: tvm.contrib.utils.TempDirectory + A temp dir to hold the untared files. + + file_list: List[str] + List of path + + Returns + ------- + ret_list: List[str] + An updated list of files + """ + temp_count = 0 + ret_list = [] + for file_path in file_list: + # enable extracting a tarball + if file_path.endswith(".tar"): + temp_dir = temp.relpath(f"temp{temp_count}") + temp_count += 1 + os.mkdir(temp_dir) + untar(file_path, temp_dir) + # append all files inside + for root, _, files in os.walk(temp_dir): + for file in files: + ret_list.append(os.path.join(root, file)) + else: + ret_list.append(file_path) + return ret_list diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tedd.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tedd.py new file mode 100644 index 0000000000000000000000000000000000000000..680297729789cb090d73e177f8cf9d0bff6100bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tedd.py @@ -0,0 +1,798 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=import-outside-toplevel, nested-min-max +"""Tensor Expression Debug Display (TEDD), visualizing Tensor Expression""" +import html +import json +import warnings +from graphviz import Digraph +from graphviz import Source +import tvm + +TVMDD_TABLE_BODY_WIDTH = 30 +# Must match enum IterVarType defined in include/tvm/expr.h +ITERVAR_TYPE_STRING_MAP = { + 0: ("kDataPar", "#FFFFFF"), + 1: ("kThreadIndex", "#2980B9"), + 2: ("kCommReduce", "#FAD7A0"), + 3: ("kOrdered", "#D35400"), + 4: ("kOpaque", "#ABB2B9"), + 5: ("kUnrolled", "#D2B4DE"), + 6: ("kVectorized", "#AED6F1"), + 7: ("kParallelized", "#F5B7B1"), + 8: ("kTensorized", "#A9DFBF"), +} + +PALETTE = { + 0: "#000000", + 1: "#922B21", + 2: "#76448A", + 3: "#1F618D", + 4: "#148F77", + 5: "#B7950B", + 6: "#AF601A", + 7: "#F5B7B1", + 8: "#A9DFBF", +} + +PALETTE_SIZE = 9 + + +def dom_path_to_string(dom_path, prefix=""): + path_string = prefix + for index in dom_path: + path_string = path_string + "_" + str(index) + return path_string + + +def insert_dot_id(sch): + """Insert unique ID for each node in the DOM tree. + They are used as Dot node ID. + """ + for stage_idx, stage in enumerate(sch["stages"]): + dom_path = [stage_idx] + stage["id"] = dom_path_to_string(dom_path, stage["type"]) + for itervar_idx, itervar in enumerate(stage["all_itervars"]): + dom_path = [stage_idx, itervar_idx] + itervar["id"] = dom_path_to_string(dom_path, itervar["type"]) + for rel_idx, rel in enumerate(stage["relations"]): + dom_path = [stage_idx, rel_idx] + rel["id"] = dom_path_to_string(dom_path, rel["type"]) + for tensor_idx, tensor in enumerate(stage["output_tensors"]): + dom_path = [stage_idx, tensor_idx] + tensor["id"] = dom_path_to_string(dom_path, tensor["type"]) + return sch + + +def itervar_equal(iv_a, iv_b): + """A helper method that compares the equality of two iterative variables""" + # Adopt the following method to assure the equality between two itervars. + # The plain comparison might fail (i.e. iv_a == iv_b) after the change of + # domain bounds from InferBound. + def _var_equal(v_a, v_b): + condtions = [ + v_a.name == v_b.name, + v_a.dtype == v_b.dtype, + v_a.type_annotation == v_b.type_annotation, + ] + return all(c for c in condtions) + + condtions = [ + _var_equal(iv_a.var, iv_b.var), + iv_a.iter_type == iv_b.iter_type, + iv_a.thread_tag == iv_b.thread_tag, + ] + return all(c for c in condtions) + + +class ObjectManager: + """A helper class tracking schedule objects, e.g. stage, IterVar, + relationship, and tensor, to their DOM path.""" + + def __init__(self, sch): + self.dict = {} + for stage_idx, stage in enumerate(sch.stages): + self.dict[stage] = [stage_idx] + for itervar_idx, itervar in enumerate(stage.all_iter_vars): + self.dict[itervar] = [stage_idx, itervar_idx] + # the itervars of leaf should also be mapped to the original one + for leaf_iv in stage.leaf_iter_vars: + if itervar_equal(leaf_iv, itervar): + self.dict[leaf_iv] = [stage_idx, itervar_idx] + for rel_idx, rel in enumerate(stage.relations): + self.dict[rel] = [stage_idx, rel_idx] + for tensor_idx in range(stage.op.num_outputs): + self.dict[frozenset({stage.op.name, tensor_idx})] = [stage_idx, tensor_idx] + + def get_dom_path(self, obj): + if obj is None: + return None + assert obj in self.dict, "Node is no found." + return self.dict[obj] + + +def get_or_create_dot_id(obj, prefix="", assert_on_missing=False): + """If obj's ID has been registered, return it. + If not, either assert or create a unique and legal ID, register and + return it, according to assert_on_missing. + ID must be a unique and legal Dotty ID. + + Parameters + ---------- + obj : objet + Serve as the key to the ID. + + prefix : string + Prefix to attach to the ID. Usually use obj's non-unique + name as prefix. + + assert_on_missing : bool + Assert or not if object doesn't have a registered ID. + """ + prefix = prefix.replace(".", "_") + if not hasattr(get_or_create_dot_id, "obj_id_dict"): + get_or_create_dot_id.obj_id_dict = {} + if obj not in get_or_create_dot_id.obj_id_dict: + if assert_on_missing: + assert False, "dot_id " + str(obj) + " has not been registered." + else: + get_or_create_dot_id.obj_id_dict[obj] = prefix + hex(id(obj)) + return get_or_create_dot_id.obj_id_dict[obj] + + +def get_port_id(is_input, index): + return "I_" + str(index) if is_input else "O_" + str(index) + + +def get_itervar_type_info(iter_type): + assert iter_type < len(ITERVAR_TYPE_STRING_MAP), "Unknown IterVar type: " + str(iter_type) + return ITERVAR_TYPE_STRING_MAP[iter_type] + + +def get_itervar_label_color(itervar, iv_type): + type_info = get_itervar_type_info(iv_type) + return ( + linebrk(str(itervar["name"]) + "(" + type_info[0] + ")", TVMDD_TABLE_BODY_WIDTH), + type_info[1], + ) + + +def linebrk(s, n): + """Break input string s with
for every n charactors.""" + result = "" + j = 0 + for i, c in enumerate(s): + if j == n and i != len(s) - 1: + result = result + "\n" + j = 0 + j = j + 1 + result = result + c + result = html.escape(str(result), quote=True) + result = result.replace("\n", "
") + return result + + +def create_graph(name="", rankdir="BT"): + graph = Digraph(name=name) + graph.graph_attr["rankdir"] = rankdir + return graph + + +def itervar_label(itervar, index, index_color, label): + return ( + '' + + str(index) + + '' + + label + + "
" + + str(itervar["properties"]["range"]) + + "" + ) + + +def stage_label(stage): + return stage["name"] + "
Scope: " + stage["properties"]["scope"] + + +def legend_label(): + """Generate legend labels.""" + label = '<' + for iter_type in ITERVAR_TYPE_STRING_MAP: + name, color = ITERVAR_TYPE_STRING_MAP[iter_type] + label += ( + '' + '" + ) + label += "
' + name + "
>" + return label + + +def leaf_itervars(stage): + filtered = filter(lambda x: (x["index"] >= 0), stage["all_itervars"]) + return sorted(filtered, key=lambda x: x["index"]) + + +def legend_dot(g): + with g.subgraph(name="cluster_legend") as subgraph: + subgraph.attr(label="Legend") + label = legend_label() + subgraph.node("legend", label, shape="none", margin="0") + + +def extract_dom_for_viz(sch, need_range=True): + json_str = dump_json(sch, need_range) + s = json.loads(json_str) + s = insert_dot_id(s) + return s + + +def dump_graph(dot_string, show_svg=True, dot_file_path="", output_dot_string=False): + """Output dot_string in various formats.""" + if dot_file_path: + try: + dot_file = open(dot_file_path, "w+") + dot_file.write(dot_string) + dot_file.close() + except IOError: + print("Cannot open file: " + dot_file_path) + if show_svg: + from IPython.display import display + from IPython.display import SVG + + src = Source(dot_string) + display(SVG(src.pipe(format="svg"))) + if output_dot_string: + return dot_string + return None + + +def dump_json(sch, need_range): + """Serialize data for visualization from a schedule in JSON format. + + Parameters + ---------- + sch : schedule + The schedule object to serialize + + Returns + ------- + json : string + Serialized JSON string + """ + + def encode_itervar(itervar, stage, index, range_map): + """Extract and encode IterVar visualization data to a dictionary""" + ivrange = range_map[itervar] if range_map is not None and itervar in range_map else None + bind_thread = None + tensor_intrin = None + if itervar in stage.iter_var_attrs: + attr = stage.iter_var_attrs[itervar] + iv_type = attr.iter_type + # binding + bind_thread = str(attr.bind_thread.var) if attr.bind_thread is not None else None + # tensorization + if attr.tensor_intrin is not None: + tensor_intrin = str(attr.tensor_intrin.body) + # remove the final \n + tensor_intrin = tensor_intrin[0:-1] if tensor_intrin[-1] == "\n" else tensor_intrin + else: + tensor_intrin = None + else: + iv_type = itervar.iter_type + itervar_dict = { + "type": "IterVar", + "index": index, + "name": str(itervar.var), + "itervar_type": iv_type, + "properties": { + "thread": bind_thread, + "intrin": tensor_intrin, + "range": str(ivrange) if ivrange is not None else "range(N/A)", + }, + } + return itervar_dict + + def encode_itervars(stage, range_map): + """Extract and encode IterVars visualization data from a stage to a dictionary""" + + def get_leaf_itervar_index(itervar, leaf_iv): + for leaf_index, ivar in enumerate(leaf_iv): + if itervar_equal(ivar, itervar): + return leaf_index + return -1 + + itervars = [] + for itervar in stage.all_iter_vars: + leaf_index = get_leaf_itervar_index(itervar, stage.leaf_iter_vars) + itervars.append(encode_itervar(itervar, stage, leaf_index, range_map)) + return itervars + + def encode_itervar_relation(obj_manager, rel): + """Extract and encode IterVar Relationship visualization data to a dictionary""" + rel_type = type(rel) + if rel_type is tvm.te.schedule.Split: + node_type = "Split_Relation" + rel_dict = { + "type": node_type, + "parent": obj_manager.get_dom_path(rel.parent), + "outer": obj_manager.get_dom_path(rel.outer), + "inner": obj_manager.get_dom_path(rel.inner), + } + elif rel_type is tvm.te.schedule.Fuse: + node_type = "Fuse_Relation" + rel_dict = { + "type": node_type, + "fused": obj_manager.get_dom_path(rel.fused), + "outer": obj_manager.get_dom_path(rel.outer), + "inner": obj_manager.get_dom_path(rel.inner), + } + elif rel_type is tvm.te.schedule.Singleton: + node_type = "Singleton_Relation" + rel_dict = { + "type": node_type, + "iter": obj_manager.get_dom_path(rel.iter), + } + else: + return None + return rel_dict + + def encode_itervar_relations(obj_manager, stage): + relations = [] + for i in range(len(stage.relations)): + rel = encode_itervar_relation(obj_manager, stage.relations[i]) + if rel is not None: + relations.append(rel) + return relations + + def encode_tensor(obj_manager, tensor, stage): + """Extract and encode tensor visualization data to a dictionary""" + tensor_dict = { + "type": "Tensor", + "source": obj_manager.get_dom_path(stage), + "value_index": tensor.value_index, + "shape": str(tensor.op.output(tensor.value_index).shape), + "data_type": tensor.op.output(tensor.value_index).dtype, + } + return tensor_dict + + def encode_tensors(obj_manager, stage): + tensors = [] + for i in range(stage.op.num_outputs): + tensor = stage.op.output(i) + tensors.append(encode_tensor(obj_manager, tensor, stage)) + tensors.sort(key=lambda tensor: tensor["value_index"]) + return tensors + + def encode_stage(obj_manager, stage, range_map): + """Extract and encode stage visualization data to a dictionary""" + stage_dict = { + "type": "Stage", + "name": stage.op.name, + "attaching_to": obj_manager.get_dom_path(stage.attach_ivar), + "compute": str(stage.op.body) if hasattr(stage.op, "body") else None, + "properties": { + "scope": stage.scope, + }, + "all_itervars": encode_itervars(stage, range_map), + "relations": encode_itervar_relations(obj_manager, stage), + "input_tensors": [ + obj_manager.get_dom_path(frozenset({tensor.op.name, tensor.value_index})) + for tensor in stage.op.input_tensors + ], + "output_tensors": encode_tensors(obj_manager, stage), + } + return stage_dict + + def encode_schedule(sch, need_range): + """Extract and encode data from a schedule for visualization to a nested dictionary. + It is useful for JSON to serialize schedule. + + Parameters + ---------- + sch : schedule + The schedule object to extract + + Returns + ------- + dict : dictionary + A nested dictionary + """ + assert isinstance( + sch, tvm.te.schedule.Schedule + ), "Input is not a tvm.te.schedule.Schedule object." + range_map = None + if need_range: + try: + range_map = tvm.te.schedule.InferBound(sch) + except tvm._ffi.base.TVMError as expt: + warnings.warn( + "Ranges are not available, because InferBound fails with the following error:\n" + + str(expt) + ) + + obj_manager = ObjectManager(sch) + stages = [] + for stage in sch.stages: + stages.append(encode_stage(obj_manager, stage, range_map)) + return { + "type": "Schedule", + "stages": stages, + } + + return json.dumps(sch, default=lambda s: encode_schedule(s, need_range)) + + +def viz_schedule_tree(sch, show_svg=False, dot_file_path="", output_dot_string=False): + """Top level API to render schedule tree + + Parameters + ---------- + sch : schedule + The schedule object to visualize + + show_svg : bool + Display graph as SVG, useful for Jupyter notebooks. + + dot_file_path : string + Dot file to save the graph. + + output_dot_string : bool + Return dot file content or an empty string. + + Returns + ------- + dot_string : string + Dot file content or an empty string according to output_dot_string + + Examples + -------- + The following code writes a schedule tree to a dot file. + + .. code-block:: python + tedd.viz_schedule_tree(s, dot_file_path = '/tmp/example.dot') + + Use the following code to render a SVG graph in a Jupyter notebook. + + .. code-block:: python + tedd.viz_schedule_tree(s, show_svg = True) + """ + + def create_schedule_tree_graph(name=""): + return create_graph(name=name, rankdir="BT") + + def root_dot(g): + g.node("ROOT", "ROOT", shape="oval", margin="0") + + def stage_node_dot(g, stage): + node_label = stage_node_label(stage) + g.node(stage["id"], node_label, shape="none", margin="0") + + def stage_node_label(stage): + """Return a html format label for the given stage.""" + label = ( + '<" + ) + + for leafiv in leaf_itervars(stage): + iv_type = leafiv["itervar_type"] + var_attr_label = "" + if "thread" in leafiv["properties"] and leafiv["properties"]["thread"] is not None: + var_attr_label = ( + var_attr_label + + '
(' + + str(leafiv["properties"]["thread"]) + + ")" + ) + if "intrin" in leafiv["properties"] and leafiv["properties"]["intrin"] is not None: + var_attr_label = ( + var_attr_label + + "
" + + linebrk( + "(tensor_intrin:" + str(leafiv["properties"]["intrin"]) + ")", + TVMDD_TABLE_BODY_WIDTH, + ) + ) + var_label, color = get_itervar_label_color(leafiv, iv_type) + label += itervar_label(leafiv, leafiv["index"], color, var_label + var_attr_label) + if stage["compute"] is not None: + label += ( + '" + ) + label += "
' + stage_label(stage) + "
' + + linebrk(str(stage["compute"]), TVMDD_TABLE_BODY_WIDTH) + + "
>" + return label + + def compute_at_dot(g, stage): + """If the given stage attaches to another stage, create an edge from it + stage to its attach point; otherwise, create an edge to the ROOT. + """ + src = stage["id"] + dst = ( + dom_path_to_string([stage["attaching_to"][0]], "Stage") + + ":" + + dom_path_to_string(stage["attaching_to"], "IterVar") + if stage["attaching_to"] is not None + else "ROOT" + ) + color = ( + PALETTE[stage["attaching_to"][1] + 1] + if stage["attaching_to"] is not None and stage["attaching_to"][1] < PALETTE_SIZE - 1 + else PALETTE[0] + ) + g.edge(src, dst, color=color) + + graph = create_schedule_tree_graph("Schedule Tree") + s = extract_dom_for_viz(sch) + legend_dot(graph) + for stage in s["stages"]: + stage_node_dot(graph, stage) + for stage in s["stages"]: + compute_at_dot(graph, stage) + root_dot(graph) + return dump_graph(graph.source, show_svg, dot_file_path, output_dot_string) + + +def viz_itervar_relationship_graph(sch, show_svg=False, dot_file_path="", output_dot_string=False): + """Top level API to render IterVar relationship graph + + Parameters + ---------- + sch : schedule + The schedule object to visualize + + show_svg : bool + Display graph as SVG, useful for Jupyter notebooks. + + dot_file_path : string + Dot file to save the graph. + + output_dot_string : bool + Return dot file content or an empty string. + + Examples + -------- + The following code writes Ian tervar relationship graph to a dot file. + + .. code-block:: python + tedd.viz_def viz_itervar_relationship_graph(sch, + (s, dot_file_path = '/tmp/example.dot') + + Use the following code to render a SVG graph in a Jupyter notebook. + + .. code-block:: python + tedd.viz_def viz_itervar_relationship_graph(sch, + (s, show_svg = True) + """ + + def create_itervar_relation_graph(name=""): + return create_graph(name=name, rankdir="TB") + + def itervar_node_dot(g, itervar, iv_type, index): + label = itervar_node_label(itervar, iv_type, index) + g.node(itervar["id"], label, shape="none", margin="0") + + def itervar_node_label(itervar, iv_type, index): + label = ( + '<' + + itervar_label( + itervar, + index, + get_itervar_label_color(itervar, iv_type)[1], + get_itervar_label_color(itervar, iv_type)[0], + ) + + "
>" + ) + return label + + def itervar_relation_node_dot(g, node_id, node_label, input_ports, output_ports): + label = itervar_relation_node_label(node_label, input_ports, output_ports) + g.node(node_id, label, shape="none", margin="0") + + def itervar_relation_node_label(node_label, input_ports, output_ports): + """Return a html format label for an itervar relationship node + including node_label and input/output ports. + """ + label = '<' + "" + max_port_num = max(len(input_ports), len(output_ports)) + for i in range(max_port_num): + if i < len(input_ports): + input_port = input_ports[i] + label += '" + else: + label += '' + label += "" + label += ( + '" + ) + label += "" + for i in range(max_port_num): + if i < len(output_ports): + output_port = output_ports[i] + label += ( + '" + ) + else: + label += '' + label += "" + label += "
' + input_port + "
' + + node_label + + "
' + output_port + "
>" + return label + + def itervar_relation_dot(g, node, node_id): + """Create an itervar relationship node.""" + node_type = node["type"] + if node_type == "Split_Relation": + node_type = "Split" + itervar_relation_node_dot(g, node_id, node_type, ["Input"], ["Outer", "Inner"]) + parent = dom_path_to_string(node["parent"], "IterVar") + outer = dom_path_to_string(node["outer"], "IterVar") + inner = dom_path_to_string(node["inner"], "IterVar") + g.edge(parent + ":itervar", node_id + ":Input") + g.edge(node_id + ":Outer", outer + ":itervar") + g.edge(node_id + ":Inner", inner + ":itervar") + elif node_type == "Fuse_Relation": + node_type = "Fuse" + itervar_relation_node_dot(g, node_id, node_type, ["Outer", "Inner"], ["Fused"]) + fused = dom_path_to_string(node["fused"], "IterVar") + outer = dom_path_to_string(node["outer"], "IterVar") + inner = dom_path_to_string(node["inner"], "IterVar") + g.edge(outer + ":itervar", node_id + ":Outer") + g.edge(inner + ":itervar", node_id + ":Inner") + g.edge(node_id + ":Fused", fused + ":itervar") + elif node_type == "Singleton_Relation": + node_type = "Singleton" + itervar_relation_node_dot(g, node_id, node_type, [], ["Iter"]) + itervar = dom_path_to_string(node["inner"], "IterVar") + g.edge(node_id + ":Iter", itervar + ":itervar") + else: + assert False, "Unknown IterVarRelationNode: " + node_type + + def stage_node_dot(g, stage): + """Create a stage node.""" + with g.subgraph(name="cluster_" + stage["id"]) as subgraph: + subgraph.attr(label=stage["name"]) + if stage["all_itervars"]: + for itervar in stage["all_itervars"]: + iv_type = itervar["itervar_type"] + itervar_node_dot(subgraph, itervar, iv_type, itervar["index"]) + for rel in stage["relations"]: + node_id = rel["id"] + itervar_relation_dot(subgraph, rel, node_id) + else: + subgraph.node(stage["name"] + "_placeholder", style="invis") + + graph = create_itervar_relation_graph("IterVar Relationship Graph") + s = extract_dom_for_viz(sch) + legend_dot(graph) + for stage in s["stages"]: + stage_node_dot(graph, stage) + + return dump_graph(graph.source, show_svg, dot_file_path, output_dot_string) + + +def viz_dataflow_graph(sch, show_svg=False, dot_file_path="", output_dot_string=False): + """Top level API to render dataflow graph + + Parameters + ---------- + sch : schedule + The schedule object to visualize + + show_svg : bool + Display graph as SVG, useful for Jupyter notebooks. + + dot_file_path : string + Dot file to save the graph. + + output_dot_string : bool + Return dot file content or an empty string. + + Examples + -------- + The following code writes a dataflow graph to a dot file. + + .. code-block:: python + tedd.viz_dataflow_graph(s, dot_file_path = '/tmp/example.dot') + + Use the following code to render a SVG graph in a Jupyter notebook. + + .. code-block:: python + tedd.viz_dataflow_graph(s, show_svg = True)""" + + def create_dataflow_graph(name=""): + return create_graph(name=name, rankdir="LR") + + def tensor_node_dot(g, tensor): + """Create a tensor node.""" + label = tensor_node_label(tensor) + g.node(tensor["id"], label, shape="oval", margin="0") + + def tensor_node_label(tensor): + """Return a html format label for the given tensor.""" + label = str(tensor["shape"]) + "\n" + str(tensor["data_type"]) + return label + + def stage_node_dot(g, stage): + """Create a stage node.""" + label = stage_node_label(stage) + g.node(stage["id"], label, shape="none", margin="0") + + def stage_node_label(stage): + """Return a html format label for the given stage.""" + rows = max(1, max(len(stage["output_tensors"]), len(stage["input_tensors"]))) + label = '<' + for i in range(rows): + label += "" + if i < len(stage["input_tensors"]): + port_id = get_port_id(True, i) + label += ( + '" + ) + else: + label += '' + if i == 0: + label += ( + '" + ) + if i < len(stage["output_tensors"]): + port_id = get_port_id(False, i) + label += ( + '" + ) + else: + label += '' + label += "" + label += "
' + str(i) + "' + + stage_label(stage) + + "' + str(i) + "
>" + return label + + def dfg_dot(g, sch): + """Create edges among stages.""" + stages = sch["stages"] + for stage in stages: + for i in range(len(stage["input_tensors"])): + src = dom_path_to_string(stage["input_tensors"][i], "Tensor") + dst = stage["id"] + ":" + get_port_id(True, i) + g.edge(src, dst) + for i in range(len(stage["output_tensors"])): + src = stage["id"] + ":" + get_port_id(False, i) + dst = stage["output_tensors"][i]["id"] + g.edge(src, dst) + + graph = create_dataflow_graph("Dataflow Graph") + s = extract_dom_for_viz(sch, need_range=False) + for stage in s["stages"]: + stage_node_dot(graph, stage) + for tensor in stage["output_tensors"]: + tensor_node_dot(graph, tensor) + + dfg_dot(graph, s) + + return dump_graph(graph.source, show_svg, dot_file_path, output_dot_string) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tflite_runtime.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tflite_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..1558e36d51af4ef05c1e7e2ab0eb187d6930bcc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tflite_runtime.py @@ -0,0 +1,121 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""TFLite runtime that load and run tflite models.""" +import tvm._ffi +from ..rpc import base as rpc_base + + +def create(tflite_model_bytes, device, runtime_target="cpu"): + """Create a runtime executor module given a tflite model and device. + Parameters + ---------- + tflite_model_byte : bytes + The tflite model to be deployed in bytes string format. + device : Device + The device to deploy the module. It can be local or remote when there + is only one Device. + runtime_target: str + Execution target of TFLite runtime: either `cpu` or `edge_tpu`. + Returns + ------- + tflite_runtime : TFLiteModule + Runtime tflite module that can be used to execute the tflite model. + """ + device_type = device.device_type + + if runtime_target == "edge_tpu": + runtime_func = "tvm.edgetpu_runtime.create" + else: + runtime_func = "tvm.tflite_runtime.create" + + if device_type >= rpc_base.RPC_SESS_MASK: + fcreate = device._rpc_sess.get_function(runtime_func) + else: + fcreate = tvm._ffi.get_global_func(runtime_func) + + return TFLiteModule(fcreate(bytearray(tflite_model_bytes), device)) + + +class TFLiteModule(object): + """Wrapper runtime module. + + This is a thin wrapper of the underlying TVM module. + you can also directly call set_input, run, and get_output + of underlying module functions + + Parameters + ---------- + module : Module + The internal tvm module that holds the actual tflite functions. + + Attributes + ---------- + module : Module + The internal tvm module that holds the actual tflite functions. + """ + + def __init__(self, module): + self.module = module + self._set_input = module["set_input"] + self._invoke = module["invoke"] + self._get_output = module["get_output"] + self._set_num_threads = module["set_num_threads"] + + def set_input(self, index, value): + """Set inputs to the module via kwargs + + Parameters + ---------- + key : int or str + The input key + + value : the input value. + The input key + + params : dict of str to NDArray + Additonal arguments + """ + self._set_input(index, value) + + def invoke(self): + """Invoke forward execution of the model + + Parameters + ---------- + input_dict: dict of str to NDArray + List of input values to be feed to + """ + self._invoke() + + def get_output(self, index): + """Get index-th output to out + + Parameters + ---------- + index : int + The output index + """ + return self._get_output(index) + + def set_num_threads(self, num_threads): + """Set the number of threads via kwargs + Parameters + ---------- + num_threads : int + The number of threads + """ + self._set_num_threads(num_threads) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/thrust.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/thrust.py new file mode 100644 index 0000000000000000000000000000000000000000..8f3178429589db80fea49aa65c0f243965178789 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/thrust.py @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Utilities for thrust""" +import logging + +from tvm._ffi import get_global_func + + +def maybe_warn(target, func_name): + if "thrust" in target.libs and get_global_func(func_name, allow_missing=True) is None: + logging.warning("thrust is requested but TVM is not built with thrust.") + + +def can_use_thrust(target, func_name): + maybe_warn(target, func_name) + return ( + target.kind.name in ["cuda", "nvptx"] + and "thrust" in target.libs + and get_global_func(func_name, allow_missing=True) + ) + + +def can_use_rocthrust(target, func_name): + maybe_warn(target, func_name) + return ( + target.kind.name == "rocm" + and "thrust" in target.libs + and get_global_func(func_name, allow_missing=True) + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tvmjs.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tvmjs.py new file mode 100644 index 0000000000000000000000000000000000000000..923301a1f509af48cc58999b45072bd3127b7745 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/tvmjs.py @@ -0,0 +1,355 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Namespace to store utilities for building web runtime.""" +import hashlib +import json +import math +import os +import shutil + +# pylint: disable=unused-import +import sys +from types import GeneratorType +from typing import Iterator, Mapping, Tuple, Union + +import numpy as np + +try: + import ml_dtypes +except ImportError: + ml_dtypes = None + +import tvm +from tvm._ffi.libinfo import find_lib_path + +from .emcc import create_tvmjs_wasm + + +def _convert_f32_to_bf16(value): + cap = np.finfo("float32").max + assert -np.finfo("float32").max == np.finfo("float32").min + bf16_limit = ((np.array([cap.view("uint32")]) >> 16) << 16).view("float32")[0] + # When the value is in [-bf16_limit, bf16_limit], round to nearest even. + # We can afford to do it in dumping phase to reduce overall rounding error. + # + # When the value is out of bound(usually mask values in attention), use truncation + # so it is equivalent to clip to the limit values + data = value.view("uint32") + rounding_bias = np.where( + np.logical_and(value < bf16_limit, value > -bf16_limit), + ((data >> 16) & 1) + 0x7FFF, + np.zeros_like(data), + ) + return ((data + rounding_bias) >> 16).astype("uint16") + + +def _convert_bf16_to_f32(value): + data = value.view("uint16") + return (data.astype("uint32") << 16).view("float32") + + +def _calculate_md5(filename): + hash_md5 = hashlib.md5() + with open(filename, "rb") as file: + for chunk in iter(lambda: file.read(8192), b""): + hash_md5.update(chunk) + return hash_md5.hexdigest() + + +class NDArrayCacheShardingManager: + """Internal helper to shard ndarrays.""" + + def __init__(self, cache_dir: str, prefix: str, shard_cap_nbytes: int): + self.cache_dir = cache_dir + self.prefix = prefix + self.curr_records = [] + self.curr_data = bytearray() + self.shard_records = [] + self.shard_cap_nbytes = shard_cap_nbytes + self.counter = 0 + + def append(self, data, name, shape, dtype, encode_format): + """Commit a record to the manager. + + Parameters + ---------- + data: bytes + Raw bytes to be appended. + + name: str + The name of the parameter + + shape: tuple + The shape of the array + + dtype: str + The dtype information + + encode_format: + The encode format of the entry + """ + rec = { + "name": name, + "shape": shape, + "dtype": dtype, + "format": encode_format, + "nbytes": len(data), + } + + if self.pending_nbytes + len(data) >= self.shard_cap_nbytes: + if len(data) * 2 >= self.shard_cap_nbytes: + # out of band data + rec["byteOffset"] = 0 + self._commit_internal(data, [rec]) + return + self.commit() + rec["byteOffset"] = self.pending_nbytes + self.curr_records.append(rec) + self.curr_data += data + + def commit(self): + """Commit a record""" + if self.pending_nbytes != 0: + self._commit_internal(self.curr_data, self.curr_records) + self.curr_data = bytearray() + self.curr_records = [] + + def finish(self): + """Finish building and return shard records.""" + self.commit() + return self.shard_records + + def _commit_internal(self, data, records): + data_path = f"{self.prefix}_{self.counter}.bin" + full_path = os.path.join(self.cache_dir, data_path) + self.counter += 1 + with open(full_path, "wb") as outfile: + outfile.write(data) + + shard_record = { + "dataPath": data_path, + "format": "raw-shard", + "nbytes": len(data), + "records": records, + "md5sum": _calculate_md5(full_path), + } + self.shard_records.append(shard_record) + + @property + def pending_nbytes(self): + """Return total bytes stored so far""" + return len(self.curr_data) + + +def dump_ndarray_cache( + params: Union[ + Mapping[str, Union[np.ndarray, tvm.runtime.NDArray]], + Iterator[Tuple[str, Union[np.ndarray, tvm.runtime.NDArray]]], + ], + cache_dir: str, + encode_format="f32-to-bf16", + meta_data=None, + shard_cap_mb=32, + show_progress: bool = True, +): + """Dump parameters to NDArray cache. + + Parameters + ---------- + params: Union[ + Mapping[str, Union[np.ndarray, tvm.runtime.NDArray]], + Iterator[Tuple[str, Union[np.ndarray, tvm.runtime.NDArray]]], + ] + The parameter dictionary or generator + + cache_dir: str + The path to the cache + + encode_format: {"f32-to-bf16", "raw"} + Encoding format. + + meta_data: json-compatible-struct or Callable[[], Any] + Extra meta_data to be stored in the cache json file, + or a callable that returns the metadata. + + shard_cap_mb: int + Maxinum number of MB to be kept per shard + + show_progress: bool + A boolean indicating if to show the dump progress. + """ + if encode_format not in ("raw", "f32-to-bf16"): + raise ValueError(f"Invalie encode_format {encode_format}") + + records = [] + from_generator = isinstance(params, GeneratorType) + total_bytes = 0 + counter = 0 + max_out_length = 0 + + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + f32_to_bf16_triggered = False + + print("Start storing to cache %s" % cache_dir) + shard_cap_nbytes = shard_cap_mb * (1 << 20) + + shard_manager = NDArrayCacheShardingManager(cache_dir, "params_shard", shard_cap_nbytes) + + param_generator = params.items() if not from_generator else params + for k, origin_v in param_generator: + shape = list(origin_v.shape) + v = origin_v + if not isinstance(v, np.ndarray): + v = v.numpy() + + # prefer to preserve original dtype, especially if the format was bfloat16 + dtype = str(origin_v.dtype) if isinstance(origin_v, tvm.nd.NDArray) else str(v.dtype) + total_bytes += math.prod(v.shape) * np.dtype(v.dtype).itemsize + + # convert fp32 to bf16 + if encode_format == "f32-to-bf16" and dtype == "float32": + data = _convert_f32_to_bf16(v).tobytes() + f32_to_bf16_triggered = True + else: + data = v.tobytes() + + shard_manager.append(data, name=k, shape=shape, dtype=dtype, encode_format=encode_format) + + counter += 1 + if show_progress: + last_cmd = "[%04d] saving %s" % (counter, k) + flush = "\r" + (" " * max_out_length) + "\r" + max_out_length = max(len(last_cmd), max_out_length) + sys.stdout.write(flush + last_cmd) + + records = shard_manager.finish() + meta_data = {} if meta_data is None else meta_data if not callable(meta_data) else meta_data() + + nd_cache_json = os.path.join(cache_dir, "ndarray-cache.json") + + with open(nd_cache_json, "w") as outfile: + json.dump({"metadata": meta_data, "records": records}, outfile, indent=4) + print( + "\nAll finished, %d total shards committed, record saved to %s" + % (shard_manager.counter, nd_cache_json) + ) + + if f32_to_bf16_triggered: + for shard in records: + for item in shard["records"]: + if item["dtype"] == "float32": + item["format"] = "raw" + item["dtype"] = "bfloat16" + b16_nd_cache_json = os.path.join(cache_dir, "ndarray-cache-b16.json") + # also dump a file that contains bf16 + with open(b16_nd_cache_json, "w") as outfile: + json.dump({"metadata": meta_data, "records": records}, outfile, indent=4) + print("Also saved a bf16 record to %s" % b16_nd_cache_json) + + +def load_ndarray_cache(cachepath: str, device: tvm.runtime.Device): + """Load the ndarray cache from the directory or json. + + + Parameters + ---------- + cachepath: str + Path to the location or json file. + + device: tvm.runtime.Device + The device we would like to load the data from. + """ + if not cachepath.endswith(".json"): + cachepath = os.path.join(cachepath, "ndarray-cache.json") + + cachedir = os.path.dirname(cachepath) + json_info = json.loads(open(cachepath, "r").read()) + result_dict = {} + + for shard_rec in json_info["records"]: + data_path = shard_rec["dataPath"] + full_data_path = os.path.join(cachedir, data_path) + raw_data = open(full_data_path, "rb").read() + assert shard_rec["format"] == "raw-shard" + assert shard_rec["nbytes"] == len(raw_data) + + for rec in shard_rec["records"]: + name = rec["name"] + shape = rec["shape"] + dtype = rec["dtype"] + encode_format = rec["format"] + offset = rec["byteOffset"] + nbytes = rec["nbytes"] + + arr = tvm.nd.empty(shape, dtype, device=device) + assert offset + nbytes <= len(raw_data) + buffer_source = raw_data[offset : offset + nbytes] + if dtype == "e4m3_float8": + if ml_dtypes is not None: + dtype = ml_dtypes.float8_e4m3fn + else: + raise RuntimeError( + "ml_dtypes is not installed, cannot convert e4m3_float8 array to numpy." + ) + if dtype == "e5m2_float8": + if ml_dtypes is not None: + dtype = ml_dtypes.float8_e5m2 + else: + raise RuntimeError( + "ml_dtypes is not installed, cannot convert e5m2_float8 array to numpy." + ) + if encode_format == "f32-to-bf16" and dtype == "float32": + data = np.frombuffer(buffer_source, dtype="uint16").reshape(shape) + arr.copyfrom(_convert_bf16_to_f32(data)) + elif dtype == "bfloat16": + data = np.frombuffer(buffer_source, dtype="uint16").reshape(shape) + arr.copyfrom(data) + else: + data = np.frombuffer(buffer_source, dtype=dtype).reshape(shape) + arr.copyfrom(data) + result_dict[name] = arr + return result_dict, json_info["metadata"] + + +def export_runtime(runtime_dir): + """Export TVMJS runtime to the runtime_dir + + Parameters + ---------- + runtime_dir: str + The runtime directory + """ + web_hint = ( + "make sure you setup tvm web runtime correctly." + + " obtain a copy of TVM source code, set TVM_HOME env variable:\n" + + " cd /path/to/tvm/web; make; npm run bundle" + ) + + jsbundle = find_lib_path("tvmjs.bundle.js", optional=True) + if not jsbundle: + raise RuntimeError("Cannot find tvmjs.bundle.js, " + web_hint) + + wasi = find_lib_path("tvmjs_runtime.wasi.js", optional=True) + if not wasi: + raise RuntimeError("Cannot find tvmjs_runtime.wasi.js, " + web_hint) + + print(f"Copy {jsbundle[0]} to {runtime_dir}") + shutil.copy(jsbundle[0], runtime_dir) + print(f"Copy {wasi[0]} to {runtime_dir}") + shutil.copy(wasi[0], runtime_dir) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/utils.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4c5cb848febabff40ad074312bb705d51f2d6a0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/utils.py @@ -0,0 +1,271 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Common system utilities""" +import atexit +import contextlib +import datetime +import os +import pathlib +import tempfile +import threading +import shutil + +try: + import fcntl +except ImportError: + fcntl = None + + +class DirectoryCreatedPastAtExit(Exception): + """Raised when a TempDirectory is created after the atexit hook runs.""" + + +class TempDirectory(object): + """Helper object to manage temp directory during testing. + + Automatically removes the directory when it went out of scope. + """ + + # When True, all TempDirectory are *NOT* deleted and instead live inside a predicable directory + # tree. + _KEEP_FOR_DEBUG = False + + # In debug mode, each tempdir is named after the sequence + _NUM_TEMPDIR_CREATED = 0 + _NUM_TEMPDIR_CREATED_LOCK = threading.Lock() + + @classmethod + def _increment_num_tempdir_created(cls): + with cls._NUM_TEMPDIR_CREATED_LOCK: + to_return = cls._NUM_TEMPDIR_CREATED + cls._NUM_TEMPDIR_CREATED += 1 + + return to_return + + _DEBUG_PARENT_DIR = None + + @classmethod + def _get_debug_parent_dir(cls): + if cls._DEBUG_PARENT_DIR is None: + all_parents = f"{tempfile.gettempdir()}/tvm-debug-mode-tempdirs" + if not os.path.isdir(all_parents): + os.makedirs(all_parents) + cls._DEBUG_PARENT_DIR = tempfile.mkdtemp( + prefix=datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S___"), dir=all_parents + ) + return cls._DEBUG_PARENT_DIR + + TEMPDIRS = set() + + @classmethod + def remove_tempdirs(cls): + temp_dirs = getattr(cls, "TEMPDIRS", None) + if temp_dirs is None: + return + + for path in temp_dirs: + shutil.rmtree(path, ignore_errors=True) + + cls.TEMPDIRS = None + + @classmethod + @contextlib.contextmanager + def set_keep_for_debug(cls, set_to=True): + """Keep temporary directories past program exit for debugging.""" + old_keep_for_debug = cls._KEEP_FOR_DEBUG + try: + cls._KEEP_FOR_DEBUG = set_to + yield + finally: + cls._KEEP_FOR_DEBUG = old_keep_for_debug + + def __init__(self, custom_path=None, keep_for_debug=None): + if self.TEMPDIRS is None: + raise DirectoryCreatedPastAtExit() + + if keep_for_debug is not None: + self._created_with_keep_for_debug = keep_for_debug + else: + self._created_with_keep_for_debug = self._KEEP_FOR_DEBUG + + if custom_path: + os.mkdir(custom_path) + self.temp_dir = custom_path + else: + if self._created_with_keep_for_debug: + parent_dir = self._get_debug_parent_dir() + self.temp_dir = f"{parent_dir}/{self._increment_num_tempdir_created():05d}" + os.mkdir(self.temp_dir) + else: + self.temp_dir = tempfile.mkdtemp() + + if not self._created_with_keep_for_debug: + self.TEMPDIRS.add(self.temp_dir) + + def remove(self): + """Remove the tmp dir""" + if self.temp_dir: + if not self._created_with_keep_for_debug: + shutil.rmtree(self.temp_dir, ignore_errors=True) + self.TEMPDIRS.remove(self.temp_dir) + self.temp_dir = None + + @property + def path(self): + return pathlib.Path(self.temp_dir) + + def __truediv__(self, other): + if not isinstance(other, (str, pathlib.Path)): + raise TypeError( + f"TempDirectory / operator: must supply str or pathlib.Path; got {repr(other)}" + ) + + return self.path / other + + def __del__(self): + temp_dirs = getattr(self, "TEMPDIRS", None) + if temp_dirs is None: + # Do nothing if the atexit hook has already run. + return + + self.remove() + + def relpath(self, name): + """Relative path in temp dir + + Parameters + ---------- + name : str + The name of the file. + + Returns + ------- + path : str + The concatenated path. + """ + return os.path.join(self.temp_dir, name) + + def listdir(self): + """List contents in the dir. + + Returns + ------- + names : list + The content of directory + """ + return os.listdir(self.temp_dir) + + +atexit.register(TempDirectory.remove_tempdirs) + + +def tempdir(custom_path=None, keep_for_debug=None): + """Create temp dir which deletes the contents when exit. + + Parameters + ---------- + custom_path : str, optional + Manually specify the exact temp dir path + + keep_for_debug : bool + Keep temp directory for debugging purposes + Returns + ------- + temp : TempDirectory + The temp directory object + """ + return TempDirectory(custom_path=custom_path, keep_for_debug=keep_for_debug) + + +class FileLock(object): + """File lock object + + Parameters + ---------- + path : str + The path to the lock + """ + + def __init__(self, path): + self.lock_file = open(path, "w") + if fcntl: + fcntl.lockf(self.lock_file, fcntl.LOCK_EX) + + def release(self): + """Release the lock""" + if self.lock_file: + if fcntl: + fcntl.lockf(self.lock_file, fcntl.LOCK_UN) + self.lock_file.close() + self.lock_file = None + + +def filelock(path): + """Create a file lock which locks on path + + Parameters + ---------- + path : str + The path to the lock + + Returns + ------- + lock : File lock object + """ + return FileLock(path) + + +def is_source_path(path): + """Check if path is source code path. + + Parameters + ---------- + path : str + A possible path + + Returns + ------- + valid : bool + Whether path is a possible source path + """ + if os.path.exists(path): + return True + if path.find("\n") != -1: + return False + spath = path.rsplit(".", 1) + return len(spath) == 2 and spath[1].strip() == spath[1] + + +def which(exec_name): + """Try to find full path of exec_name + + Parameters + ---------- + exec_name : str + The executable name + + Returns + ------- + path : str + The full path of executable if found, otherwise returns None + """ + base_list = ["", "/bin"] + os.environ.get("PATH", "").split(os.pathsep) + for path in base_list: + full_path = os.path.join(path, exec_name) + if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + return full_path + return None diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/xcode.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/xcode.py new file mode 100644 index 0000000000000000000000000000000000000000..2b68600197e48eab12f1c4d6bdf0606528e91d0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/contrib/xcode.py @@ -0,0 +1,184 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""Utility to invoke Xcode compiler toolchain""" +from __future__ import absolute_import as _abs + +import os +import sys +import subprocess +import json +from .._ffi.base import py_str +from . import utils + + +def xcrun(cmd): + """Run xcrun and return the output. + + Parameters + ---------- + cmd : list of str + The command sequence. + + Returns + ------- + out : str + The output string. + """ + cmd = ["xcrun"] + cmd + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + return out.strip() + + +def __get_min_os_version(sdk): + if sdk == "macosx": + return None + if sdk in ("iphoneos", "iphonesimulator"): + return "13.0" + raise RuntimeError(f"Unsupported sdk: {sdk}") + + +def __get_min_os_version_cmd(sdk, min_os_version): + if min_os_version is None: + min_os_version = __get_min_os_version(sdk) + if min_os_version is not None: + return "-mios-version-min=" + min_os_version + return "" + + +def create_dylib(output, objects, arch, sdk="macosx", min_os_version=None): + """Create dynamic library. + + Parameters + ---------- + output : str + The target shared library. + + objects : list + List of object files. + + options : str + The additional options. + + arch : str + Target major architectures + + sdk : str + The sdk to be used. + """ + clang = xcrun(["-sdk", sdk, "-find", "clang"]) + sdk_path = xcrun(["-sdk", sdk, "--show-sdk-path"]) + cmd = [clang] + cmd += ["-dynamiclib"] + cmd += ["-arch", arch] + cmd += ["-isysroot", sdk_path] + cmd += [__get_min_os_version_cmd(sdk, min_os_version)] + cmd += ["-o", output] + if isinstance(objects, str): + cmd += [objects] + else: + cmd += objects + + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = "Compilation error:\n" + msg += py_str(out) + raise RuntimeError(msg) + + +# assign so as default output format +create_dylib.output_format = "dylib" + + +def compile_metal(code, path_target=None, sdk="macosx", min_os_version=None): + """Compile metal with CLI tool from env. + + Parameters + ---------- + code : str + The cuda code. + + path_target : str, optional + Output file. + + sdk : str, optional + The target platform SDK. + + Return + ------ + metallib : bytearray + The bytearray of the metallib + """ + temp = utils.tempdir() + temp_code = temp.relpath("my_lib.metal") + temp_ir = temp.relpath("my_lib.air") + temp_target = temp.relpath("my_lib.metallib") + + with open(temp_code, "w") as out_file: + out_file.write(code) + file_target = path_target if path_target else temp_target + + # See: + # - https://developer.apple.com/documentation/metal/gpu_functions_libraries/building_a_library_with_metal_s_command-line_tools#overview # pylint: disable=line-too-long + # + # xcrun -sdk macosx metal -c MyLibrary.metal -o MyLibrary.air + # xcrun -sdk macosx metallib MyLibrary.air -o MyLibrary.metallib + min_target = __get_min_os_version_cmd(sdk, min_os_version) + if sdk == "macosx": + language_version = "-std=macos-metal2.3" + elif sdk in ("iphoneos", "iphonesimulator"): + language_version = "-std=ios-metal2.3" + else: + raise RuntimeError(f"Unsupported sdk: {sdk}") + cmd1 = ["xcrun", "-sdk", sdk, "metal", language_version, min_target, "-O3"] + cmd1 += ["-c", temp_code, "-o", temp_ir] + cmd2 = ["xcrun", "-sdk", sdk, "metallib"] + cmd2 += [temp_ir, "-o", file_target] + proc = subprocess.Popen( + " ".join(cmd1) + ";" + " ".join(cmd2), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + (out, _) = proc.communicate() + if proc.returncode != 0: + sys.stderr.write("Compilation error:\n") + sys.stderr.write(py_str(out)) + sys.stderr.flush() + libbin = None + else: + libbin = bytearray(open(file_target, "rb").read()) + return libbin + + +def compile_coreml(model, model_name="main", out_dir="."): + """Compile coreml model and return the compiled model path.""" + mlmodel_path = os.path.join(out_dir, model_name + ".mlmodel") + mlmodelc_path = os.path.join(out_dir, model_name + ".mlmodelc") + metadata = {"inputs": list(model.input_description), "outputs": list(model.output_description)} + # Use the description field to send info to CoreML runtime + model.short_description = json.dumps(metadata) + model.save(mlmodel_path) + + res = xcrun(["coremlcompiler", "compile", mlmodel_path, out_dir]) + if not os.path.isdir(mlmodelc_path): + raise RuntimeError(f"Compile failed: {res}") + + return mlmodelc_path diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/error.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/error.py new file mode 100644 index 0000000000000000000000000000000000000000..6bf9b16850858493e7d6114548e6f2f56d0e28dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/error.py @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Structured error classes in TVM. + +Each error class takes an error message as its input. +See the example sections for suggested message conventions. +To make the code more readable, we recommended developers to +copy the examples and raise errors with the same message convention. + +.. note:: + + Please also refer to :ref:`error-handling-guide`. +""" +from tvm._ffi.base import TVMError, register_error + + +@register_error +class InternalError(TVMError): + """Internal error in the system. + + Examples + -------- + .. code :: c++ + + // Example code C++ + LOG(FATAL) << "InternalError: internal error detail."; + + .. code :: python + + # Example code in python + raise InternalError("internal error detail") + """ + + def __init__(self, msg): + super(InternalError, self).__init__(msg) + + +register_error("ValueError", ValueError) +register_error("TypeError", TypeError) +register_error("AttributeError", AttributeError) +register_error("KeyError", KeyError) +register_error("IndexError", IndexError) +register_error("AssertionError", AssertionError) + + +@register_error +class RPCError(TVMError): + """Error thrown by the remote server handling the RPC call.""" + + +@register_error +class RPCSessionTimeoutError(RPCError, TimeoutError): + """Error thrown by the remote server when the RPC session has expired.""" + + +@register_error +class OpError(TVMError): + """Base class of all operator errors in frontends.""" + + +@register_error +class OpNotImplemented(OpError, NotImplementedError): + """Operator is not implemented. + + Example + ------- + .. code:: python + + raise OpNotImplemented( + "Operator {} is not supported in {} frontend".format( + missing_op, frontend_name)) + """ + + +@register_error +class OpAttributeRequired(OpError, AttributeError): + """Required attribute is not found. + + Example + ------- + .. code:: python + + raise OpAttributeRequired( + "Required attribute {} not found in operator {}".format( + attr_name, op_name)) + """ + + +@register_error +class OpAttributeInvalid(OpError, AttributeError): + """Attribute value is invalid when taking in a frontend operator. + + Example + ------- + .. code:: python + + raise OpAttributeInvalid( + "Value {} in attribute {} of operator {} is not valid".format( + value, attr_name, op_name)) + """ + + +@register_error +class OpAttributeUnImplemented(OpError, NotImplementedError): + """Attribute is not supported in a certain frontend. + + Example + ------- + .. code:: python + + raise OpAttributeUnImplemented( + "Attribute {} is not supported in operator {}".format( + attr_name, op_name)) + """ + + +@register_error +class DiagnosticError(TVMError): + """Error diagnostics were reported during the execution of a pass. + + See the configured diagnostic renderer for detailed error information. + """ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/generic.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..7c46312c2ea59fbc7b66e3712419f0d6fdc570f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/generic.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Generic operators.""" +# pylint:disable=unused-wildcard-import, wildcard-import +from .tir.generic import * diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/parser.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..b79682d8907b158f89811780d070aea293fd9065 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/parser.py @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name +"""The legacy TVM parser """ +from .ir.base import deprecated + +# pylint: disable=import-outside-toplevel + + +@deprecated("tvm.parser.parse", "tvm.relay.parse") +def parse(*args, **kwargs): + """Deprecated, use `tvm.relay.parse` instead""" + from tvm.relay import parse as _impl + + return _impl(*args, **kwargs) + + +@deprecated("tvm.parser.parse_expr", "tvm.relay.parse_expr") +def parse_expr(*args, **kwargs): + """Deprecated, use `tvm.relay.parse_expr` instead""" + from tvm.relay import parse_expr as _impl + + return _impl(*args, **kwargs) + + +@deprecated("tvm.parser.fromtext", "tvm.relay.fromtext") +def fromtext(*args, **kwargs): + """Deprecated, use `tvm.relay.fromtext` instead""" + from tvm.relay import fromtext as _impl + + return _impl(*args, **kwargs) + + +@deprecated("tvm.parser.SpanCheck", "tvm.relay.SpanCheck") +def SpanCheck(*args, **kwargs): + """Deprecated, use `tvm.relay.SpanCheck` instead""" + from tvm.relay import SpanCheck as _impl + + return _impl(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/support.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/support.py new file mode 100644 index 0000000000000000000000000000000000000000..a50a5e7b573281cc81534454128e42532056433f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/python/tvm/support.py @@ -0,0 +1,89 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Support infra of TVM.""" +import json +import textwrap +import ctypes +import os +import sys + +import tvm +import tvm._ffi +from .runtime.module import Module +from . import get_global_func + +tvm._ffi._init_api("support", __name__) + + +def libinfo(): + """Returns a dictionary containing compile-time info, including cmake flags and git commit hash + + Returns + ------- + info: Dict[str, str] + The dictionary of compile-time info. + """ + get_lib_info_func = get_global_func("support.GetLibInfo", allow_missing=True) + if get_lib_info_func is not None: + lib_info = get_lib_info_func() + if lib_info is None: + return {} + else: + return {} + return dict(lib_info.items()) + + +def describe(): + """ + Print out information about TVM and the current Python environment + """ + info = list((k, v) for k, v in libinfo().items()) + info = dict(sorted(info, key=lambda x: x[0])) + print("Python Environment") + sys_version = sys.version.replace("\n", " ") + uname = os.uname() + uname = f"{uname.sysname} {uname.release} {uname.version} {uname.machine}" + lines = [ + f"TVM version = {tvm.__version__}", + f"Python version = {sys_version} ({sys.maxsize.bit_length() + 1} bit)", + f"os.uname() = {uname}", + ] + print(textwrap.indent("\n".join(lines), prefix=" ")) + print("CMake Options:") + print(textwrap.indent(json.dumps(info, indent=2), prefix=" ")) + + +class FrontendTestModule(Module): + """A tvm.runtime.Module whose member functions are PackedFunc.""" + + def __init__(self, entry_name=None): + underlying_mod = get_global_func("testing.FrontendTestModule")() + handle = underlying_mod.handle + + # Set handle to NULL to avoid cleanup in c++ runtime, transferring ownership. + # Both cython and ctypes FFI use c_void_p, so this is safe to assign here. + underlying_mod.handle = ctypes.c_void_p(0) + + super(FrontendTestModule, self).__init__(handle) + if entry_name is not None: + self.entry_name = entry_name + + def add_function(self, name, func): + self.get_function("__add_function")(name, func) + + def __setitem__(self, key, value): + self.add_function(key, value) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/version.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/version.py new file mode 100644 index 0000000000000000000000000000000000000000..e25b954ea667b61aac2fb07df42e8dca9bb68a50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/tvm/version.py @@ -0,0 +1,232 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +This is the global script that set the version information of TVM. +This script runs and update all the locations that related to versions + +List of affected files: +- tvm-root/python/tvm/_ffi/libinfo.py +- tvm-root/include/tvm/runtime/c_runtime_api.h +- tvm-root/conda/recipe/meta.yaml +- tvm-root/web/package.json +""" +import os +import re +import argparse +import logging +import subprocess + +# Modify the following value during release +# --------------------------------------------------- +# Current version: +# We use the version of the incoming release for code +# that is under development. +# +# It is also fallback version to be used when --git-describe +# is not invoked, or when the repository does not present the +# git tags in a format that this script can use. +# +# Two tag formats are supported: +# - vMAJ.MIN.PATCH (e.g. v0.8.0) or +# - vMAJ.MIN.devN (e.g. v0.8.dev0) +__version__ = "0.17.dev0" + +# --------------------------------------------------- + +PROJ_ROOT = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) + + +def py_str(cstr): + return cstr.decode("utf-8") + + +def git_describe_version(): + """Get PEP-440 compatible public and local version using git describe. + + Returns + ------- + pub_ver: str + Public version. + + local_ver: str + Local version (with additional label appended to pub_ver). + + Notes + ----- + - We follow PEP 440's convention of public version + and local versions. + - Only tags conforming to vMAJOR.MINOR.REV (e.g. "v0.7.0") + are considered in order to generate the version string. + See the use of `--match` in the `git` command below. + + Here are some examples: + + - pub_ver = '0.7.0', local_ver = '0.7.0': + We are at the 0.7.0 release. + - pub_ver = '0.8.dev94', local_ver = '0.8.dev94+g0d07a329e': + We are at the 0.8 development cycle. + The current source contains 94 additional commits + after the most recent tag(v0.7.0), + the git short hash tag of the current commit is 0d07a329e. + """ + cmd = [ + "git", + "describe", + "--tags", + "--match", + "v[0-9]*.[0-9]*.[0-9]*", + "--match", + "v[0-9]*.[0-9]*.dev[0-9]*", + ] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=PROJ_ROOT) + (out, _) = proc.communicate() + + if proc.returncode != 0: + msg = py_str(out) + if msg.find("not a git repository") != -1: + return __version__, __version__ + logging.warning("git describe: %s, use %s", msg, __version__) + return __version__, __version__ + describe = py_str(out).strip() + arr_info = describe.split("-") + + # Remove the v prefix, mainly to be robust + # to the case where v is not presented as well. + if arr_info[0].startswith("v"): + arr_info[0] = arr_info[0][1:] + + # hit the exact tag + if len(arr_info) == 1: + return arr_info[0], arr_info[0] + + if len(arr_info) != 3: + logging.warning("Invalid output from git describe %s", describe) + return __version__, __version__ + + dev_pos = arr_info[0].find(".dev") + + # Development versions: + # The code will reach this point in case it can't match a full release version, such as v0.7.0. + # + # 1. in case the last known label looks like vMAJ.MIN.devN e.g. v0.8.dev0, we use + # the current behaviour of just using vMAJ.MIN.devNNNN+gGIT_REV + if dev_pos != -1: + dev_version = arr_info[0][: arr_info[0].find(".dev")] + # 2. in case the last known label looks like vMAJ.MIN.PATCH e.g. v0.8.0 + # then we just carry on with a similar version to what git describe provides, which is + # vMAJ.MIN.PATCH.devNNNN+gGIT_REV + else: + dev_version = arr_info[0] + + pub_ver = "%s.dev%s" % (dev_version, arr_info[1]) + local_ver = "%s+%s" % (pub_ver, arr_info[2]) + return pub_ver, local_ver + + +# Implementations +def update(file_name, pattern, repl, dry_run=False): + update = [] + hit_counter = 0 + need_update = False + with open(file_name) as file: + for l in file: + result = re.findall(pattern, l) + if result: + assert len(result) == 1 + hit_counter += 1 + if result[0] != repl: + l = re.sub(pattern, repl, l) + need_update = True + print("%s: %s -> %s" % (file_name, result[0], repl)) + else: + print("%s: version is already %s" % (file_name, repl)) + + update.append(l) + if hit_counter != 1: + raise RuntimeError("Cannot find version in %s" % file_name) + + if need_update and not dry_run: + with open(file_name, "w") as output_file: + for l in update: + output_file.write(l) + + +def sync_version(pub_ver, local_ver, dry_run): + """Synchronize version.""" + # python uses the PEP-440: local version + update( + os.path.join(PROJ_ROOT, "python", "tvm", "_ffi", "libinfo.py"), + r"(?<=__version__ = \")[.0-9a-z\+]+", + local_ver, + dry_run, + ) + # Use public version for other parts for now + # Note that full git hash is already available in libtvm + # C++ header + update( + os.path.join(PROJ_ROOT, "include", "tvm", "runtime", "c_runtime_api.h"), + r'(?<=TVM_VERSION ")[.0-9a-z\+]+', + pub_ver, + dry_run, + ) + # conda + update( + os.path.join(PROJ_ROOT, "conda", "recipe", "meta.yaml"), + r"(?<=version = ')[.0-9a-z\+]+", + pub_ver, + dry_run, + ) + # web + # change to pre-release convention by npm + dev_pos = pub_ver.find(".dev") + npm_ver = pub_ver if dev_pos == -1 else "%s.0-%s" % (pub_ver[:dev_pos], pub_ver[dev_pos + 1 :]) + update( + os.path.join(PROJ_ROOT, "web", "package.json"), + r'(?<="version": ")[.0-9a-z\-\+]+', + npm_ver, + dry_run, + ) + + +def main(): + logging.basicConfig(level=logging.INFO) + parser = argparse.ArgumentParser(description="Detect and synchronize version.") + parser.add_argument( + "--print-version", + action="store_true", + help="Print version to the command line. No changes is applied to files.", + ) + parser.add_argument( + "--git-describe", + action="store_true", + help="Use git describe to generate development version.", + ) + parser.add_argument("--dry-run", action="store_true") + + opt = parser.parse_args() + pub_ver, local_ver = __version__, __version__ + if opt.git_describe: + pub_ver, local_ver = git_describe_version() + if opt.print_version: + print(local_ver) + else: + sync_version(pub_ver, local_ver, opt.dry_run) + + +if __name__ == "__main__": + main()